Learn how to optimize Linux network performance using sysctl kernel parameters, TCP BBR congestion control, and advanced buffer tuning. This guide covers baseline testing, monitoring, and production-grade configurations for high-throughput servers.
Prerequisites
- Root or sudo access
- Linux kernel 4.9+ for BBR support
- Network testing tools (iperf3)
- Basic understanding of TCP/IP concepts
What this solves
Linux network performance can be significantly improved through kernel parameter tuning and TCP congestion control optimization. This tutorial helps you configure sysctl parameters, enable TCP BBR congestion control, optimize network buffers, and monitor network performance metrics for high-throughput servers and bandwidth-intensive applications.
Prerequisites and system requirements
You need root access to modify kernel parameters and network stack configuration. Modern kernels (4.9+) support BBR congestion control, while older kernels use CUBIC by default.
Install network performance tools
Install essential tools for network performance testing and monitoring.
sudo apt update
sudo apt install -y iperf3 netstat-nat ss ethtool procps net-tools
Network performance baseline testing
Check current network configuration
Document your current network stack configuration before making changes.
cat /proc/sys/net/ipv4/tcp_congestion_control
cat /proc/sys/net/core/rmem_max
cat /proc/sys/net/core/wmem_max
ss -i | head -20
Test baseline network performance
Run iperf3 tests to establish baseline performance metrics before optimization.
# On the server (replace with your server IP)
iperf3 -s -p 5201
On the client (run from another machine)
iperf3 -c 203.0.113.10 -p 5201 -t 30 -P 4
Monitor network interface statistics
Check network interface statistics and identify potential bottlenecks.
ethtool -S eth0 | grep -E '(drop|error|fifo)'
cat /proc/net/dev
cat /proc/net/sockstat
TCP congestion control optimization
Check available congestion control algorithms
List available TCP congestion control algorithms supported by your kernel.
cat /proc/sys/net/ipv4/tcp_available_congestion_control
modprobe tcp_bbr
echo 'tcp_bbr' | sudo tee -a /etc/modules-load.d/modules.conf
Enable TCP BBR congestion control
Configure TCP BBR for better bandwidth utilization and reduced latency.
# TCP BBR congestion control
net.core.default_qdisc = fq
net.ipv4.tcp_congestion_control = bbr
Enable BBR slow start after idle
net.ipv4.tcp_slow_start_after_idle = 0
Configure advanced TCP parameters
Optimize TCP window scaling, timestamps, and selective acknowledgments.
# TCP window scaling and timestamps
net.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_timestamps = 1
net.ipv4.tcp_sack = 1
TCP keepalive settings
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 60
net.ipv4.tcp_keepalive_probes = 3
Network buffer and queue optimization
Optimize socket buffer sizes
Configure kernel socket buffers for high-throughput network operations.
# Socket buffer sizes (16MB max)
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.core.rmem_default = 262144
net.core.wmem_default = 262144
TCP socket buffer sizes (min, default, max)
net.ipv4.tcp_rmem = 4096 87380 16777216
net.ipv4.tcp_wmem = 4096 65536 16777216
Configure network device queues
Optimize network device receive and transmit queues for better performance.
# Network device queue settings
net.core.netdev_max_backlog = 5000
net.core.netdev_budget = 600
TCP connection queue sizes
net.core.somaxconn = 1024
net.ipv4.tcp_max_syn_backlog = 2048
Enable TCP fast open and optimize connection handling
Configure TCP fast open and connection reuse for reduced latency.
# TCP Fast Open (client and server)
net.ipv4.tcp_fastopen = 3
TCP connection reuse and recycling
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_fin_timeout = 10
TCP memory pressure settings
net.ipv4.tcp_mem = 786432 1048576 26777216
net.ipv4.tcp_max_tw_buckets = 360000
High-throughput server tuning
Configure TCP congestion window and pacing
Optimize TCP congestion window initialization and pacing for high-bandwidth networks.
# Initial congestion window
net.ipv4.tcp_init_cwnd = 10
TCP pacing (works with fq qdisc)
net.core.default_qdisc = fq_codel
TCP no metrics save
net.ipv4.tcp_no_metrics_save = 1
Optimize for high-concurrent connections
Configure kernel parameters for servers handling many simultaneous connections.
# IP port range for outbound connections
net.ipv4.ip_local_port_range = 1024 65535
File descriptor limits
fs.file-max = 2097152
Network security and performance
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_rfc1337 = 1
Apply sysctl configuration
Load the new kernel parameters and verify they are active.
sudo sysctl -p /etc/sysctl.d/10-network-performance.conf
sudo sysctl net.ipv4.tcp_congestion_control
sudo sysctl net.core.default_qdisc
Configure network interface ring buffers
Optimize network interface ring buffer sizes for your network card.
# Check current ring buffer sizes
ethtool -g eth0
Increase ring buffer sizes (adjust based on your NIC)
sudo ethtool -G eth0 rx 4096 tx 4096
Make permanent by adding to network configuration
echo 'ethtool -G eth0 rx 4096 tx 4096' | sudo tee -a /etc/rc.local
Monitoring network performance metrics
Monitor TCP congestion control effectiveness
Check TCP congestion control statistics and connection metrics.
ss -i | grep -E '(cubic|bbr|reno)'
cat /proc/net/netstat | grep TcpExt
nstat -a | grep -E '(Tcp|Ip)' | head -20
Create network monitoring script
Set up automated monitoring of key network performance indicators.
#!/bin/bash
Network performance monitoring script
echo "=== Network Performance Report $(date) ==="
echo "TCP Congestion Control:"
cat /proc/sys/net/ipv4/tcp_congestion_control
echo "Socket Statistics:"
ss -s
echo "TCP Connection States:"
ss -tan state established | wc -l
ss -tan state time-wait | wc -l
echo "Network Interface Statistics:"
ethtool -S eth0 | grep -E '(rx_bytes|tx_bytes|rx_dropped|tx_dropped)'
echo "TCP Retransmissions:"
nstat TcpRetransSegs | tail -1
echo "Buffer Usage:"
cat /proc/net/sockstat
Make monitoring script executable
Set proper permissions and test the monitoring script.
sudo chmod 755 /usr/local/bin/network-monitor.sh
sudo /usr/local/bin/network-monitor.sh
Verify your setup
Test the network optimizations and verify improved performance.
# Verify TCP BBR is active
cat /proc/sys/net/ipv4/tcp_congestion_control
Check buffer sizes
sysctl net.core.rmem_max net.core.wmem_max
Test network performance (run iperf3 server first)
iperf3 -c 203.0.113.10 -p 5201 -t 30 -P 4
Monitor active connections
ss -tan state established | head -10
Check for network errors
ethtool -S eth0 | grep -E '(error|drop)' | grep -v ': 0'
Performance testing and validation
Run comprehensive performance tests
Compare before and after performance using multiple test scenarios.
# Single connection test
iperf3 -c 203.0.113.10 -t 30
Multiple parallel connections
iperf3 -c 203.0.113.10 -t 30 -P 8
UDP bandwidth test
iperf3 -c 203.0.113.10 -u -b 1000M -t 30
Reverse test (server sends to client)
iperf3 -c 203.0.113.10 -R -t 30
For advanced performance monitoring integration, see our guide on configuring Linux performance monitoring with collectd and InfluxDB. For complementary I/O optimizations, check our tutorial on optimizing Linux I/O performance.
Common issues
| Symptom | Cause | Fix |
|---|---|---|
| BBR not available | Kernel version too old | Upgrade to kernel 4.9+ or use CUBIC with optimized parameters |
| No performance improvement | Network not bandwidth-limited | Test on high-latency or congested networks |
| Connection timeouts | Aggressive timeout settings | Increase tcp_fin_timeout and keepalive values |
| High memory usage | Large buffer sizes | Reduce rmem_max and wmem_max values by 50% |
| Packet drops | Small ring buffers | Increase NIC ring buffer sizes with ethtool -G |
| sysctl changes not persistent | Configuration not in sysctl.d | Ensure config is in /etc/sysctl.d/ and run sysctl -p |
Next steps
- Optimize HAProxy performance with connection pooling and advanced load balancing
- Configure network bonding and VLAN tagging for high availability
- Set up Linux network traffic shaping with tc and QoS
- Configure NGINX reverse proxy with advanced caching and load balancing
- Implement Linux network security with iptables and connection tracking
Automated install script
Run this to automate the entire setup
#!/usr/bin/env bash
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Configuration
SYSCTL_FILE="/etc/sysctl.d/99-network-performance.conf"
MODULES_FILE="/etc/modules-load.d/network-modules.conf"
# Usage function
usage() {
echo "Usage: $0 [--baseline-only] [--help]"
echo ""
echo "Options:"
echo " --baseline-only Only install tools and show baseline, don't apply optimizations"
echo " --help Show this help message"
exit 1
}
# Parse arguments
BASELINE_ONLY=false
while [[ $# -gt 0 ]]; do
case $1 in
--baseline-only)
BASELINE_ONLY=true
shift
;;
--help)
usage
;;
*)
echo -e "${RED}Unknown option: $1${NC}"
usage
;;
esac
done
# Log function
log() {
echo -e "${GREEN}[INFO]${NC} $1"
}
warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Cleanup function for rollback
cleanup() {
if [ $? -ne 0 ]; then
error "Installation failed. Rolling back changes..."
[ -f "${SYSCTL_FILE}.backup" ] && mv "${SYSCTL_FILE}.backup" "$SYSCTL_FILE"
[ -f "${MODULES_FILE}.backup" ] && mv "${MODULES_FILE}.backup" "$MODULES_FILE"
fi
}
trap cleanup ERR
# Check if running as root
check_root() {
if [ "$EUID" -ne 0 ]; then
error "This script must be run as root or with sudo"
exit 1
fi
}
# Detect distribution
detect_distro() {
if [ -f /etc/os-release ]; then
. /etc/os-release
case "$ID" in
ubuntu|debian)
PKG_MGR="apt"
PKG_INSTALL="apt install -y"
PKG_UPDATE="apt update"
;;
almalinux|rocky|centos|rhel|ol|fedora)
PKG_MGR="dnf"
PKG_INSTALL="dnf install -y"
PKG_UPDATE="dnf update -y"
;;
amzn)
PKG_MGR="yum"
PKG_INSTALL="yum install -y"
PKG_UPDATE="yum update -y"
;;
*)
error "Unsupported distribution: $ID"
exit 1
;;
esac
else
error "Cannot detect distribution. /etc/os-release not found."
exit 1
fi
}
# Install network performance tools
install_tools() {
log "Updating package repositories..."
$PKG_UPDATE
log "Installing network performance tools..."
case "$PKG_MGR" in
apt)
$PKG_INSTALL iperf3 netstat-nat ss ethtool procps net-tools
;;
dnf|yum)
$PKG_INSTALL iperf3 net-tools ethtool procps-ng
;;
esac
}
# Show current network configuration
show_baseline() {
log "Current network configuration:"
echo "TCP Congestion Control: $(cat /proc/sys/net/ipv4/tcp_congestion_control 2>/dev/null || echo 'unknown')"
echo "Max receive buffer: $(cat /proc/sys/net/core/rmem_max 2>/dev/null || echo 'unknown')"
echo "Max send buffer: $(cat /proc/sys/net/core/wmem_max 2>/dev/null || echo 'unknown')"
echo ""
log "Available congestion control algorithms:"
cat /proc/sys/net/ipv4/tcp_available_congestion_control 2>/dev/null || echo "unknown"
echo ""
log "Network interface statistics (errors/drops):"
if command -v ethtool >/dev/null 2>&1; then
for iface in $(ls /sys/class/net/ | grep -v lo); do
echo "Interface: $iface"
ethtool -S "$iface" 2>/dev/null | grep -E '(drop|error|fifo)' | head -5 || echo " No statistics available"
done
fi
}
# Enable BBR congestion control
enable_bbr() {
log "Enabling TCP BBR congestion control..."
# Load BBR module
modprobe tcp_bbr 2>/dev/null || warn "Could not load tcp_bbr module (may already be built-in)"
# Create modules file backup
[ -f "$MODULES_FILE" ] && cp "$MODULES_FILE" "${MODULES_FILE}.backup"
# Ensure BBR module loads at boot
mkdir -p "$(dirname "$MODULES_FILE")"
if ! grep -q "tcp_bbr" "$MODULES_FILE" 2>/dev/null; then
echo "tcp_bbr" >> "$MODULES_FILE"
fi
chmod 644 "$MODULES_FILE"
}
# Apply sysctl optimizations
apply_sysctl_config() {
log "Applying network performance optimizations..."
# Create backup of existing config
[ -f "$SYSCTL_FILE" ] && cp "$SYSCTL_FILE" "${SYSCTL_FILE}.backup"
# Create sysctl configuration
mkdir -p "$(dirname "$SYSCTL_FILE")"
cat > "$SYSCTL_FILE" << 'EOF'
# Network Performance Optimizations
# Generated by network performance tuning script
# TCP BBR congestion control
net.core.default_qdisc = fq
net.ipv4.tcp_congestion_control = bbr
net.ipv4.tcp_slow_start_after_idle = 0
# TCP window scaling and advanced features
net.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_timestamps = 1
net.ipv4.tcp_sack = 1
# TCP keepalive settings
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 60
net.ipv4.tcp_keepalive_probes = 3
# Socket buffer sizes (16MB max)
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.core.rmem_default = 262144
net.core.wmem_default = 262144
# TCP socket buffer sizes (min, default, max)
net.ipv4.tcp_rmem = 4096 87380 16777216
net.ipv4.tcp_wmem = 4096 65536 16777216
# Network device queue settings
net.core.netdev_max_backlog = 5000
net.core.netdev_budget = 600
# TCP connection queue sizes
net.core.somaxconn = 1024
net.ipv4.tcp_max_syn_backlog = 2048
# TCP Fast Open and connection optimization
net.ipv4.tcp_fastopen = 3
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_fin_timeout = 10
# TCP memory settings
net.ipv4.tcp_mem = 786432 1048576 16777216
net.ipv4.tcp_max_tw_buckets = 360000
# Initial congestion window
net.ipv4.tcp_init_cwnd = 10
# Additional high-performance settings
net.ipv4.tcp_mtu_probing = 1
net.ipv4.tcp_base_mss = 1024
EOF
chmod 644 "$SYSCTL_FILE"
# Apply the configuration
sysctl -p "$SYSCTL_FILE"
}
# Verify configuration
verify_config() {
log "Verifying network optimizations..."
local errors=0
# Check BBR
if [[ "$(cat /proc/sys/net/ipv4/tcp_congestion_control)" == "bbr" ]]; then
log "✓ TCP BBR congestion control enabled"
else
warn "✗ TCP BBR not active, current: $(cat /proc/sys/net/ipv4/tcp_congestion_control)"
((errors++))
fi
# Check buffer sizes
local rmem_max=$(cat /proc/sys/net/core/rmem_max)
if [[ "$rmem_max" -ge 16777216 ]]; then
log "✓ Receive buffer size optimized (${rmem_max})"
else
warn "✗ Receive buffer size not optimal: ${rmem_max}"
((errors++))
fi
# Check queue discipline
if [[ "$(cat /proc/sys/net/core/default_qdisc)" == "fq" ]]; then
log "✓ Fair Queue (fq) qdisc enabled"
else
warn "✗ Fair Queue qdisc not active: $(cat /proc/sys/net/core/default_qdisc)"
((errors++))
fi
if [[ $errors -eq 0 ]]; then
log "All network optimizations verified successfully!"
else
warn "Some optimizations may require a reboot to take full effect"
fi
}
# Show performance testing instructions
show_testing_instructions() {
log "Network optimization complete!"
echo ""
log "To test performance improvements:"
echo "1. Server side: iperf3 -s -p 5201"
echo "2. Client side: iperf3 -c YOUR_SERVER_IP -p 5201 -t 30 -P 4"
echo ""
log "Monitor network stats with:"
echo "- ss -i | head -20"
echo "- cat /proc/net/sockstat"
echo "- ethtool -S INTERFACE_NAME"
echo ""
warn "Consider rebooting to ensure all optimizations are active"
}
# Main execution
main() {
log "[1/6] Checking prerequisites..."
check_root
detect_distro
log "[2/6] Installing network performance tools..."
install_tools
log "[3/6] Showing current network baseline..."
show_baseline
if [[ "$BASELINE_ONLY" == "true" ]]; then
log "Baseline-only mode. Exiting without applying optimizations."
exit 0
fi
log "[4/6] Enabling TCP BBR congestion control..."
enable_bbr
log "[5/6] Applying network performance optimizations..."
apply_sysctl_config
log "[6/6] Verifying configuration..."
verify_config
show_testing_instructions
}
main "$@"
Review the script before running. Execute with: bash install.sh