githubEdit

Configuration Examples

This page provides complete, real-world configuration examples for common Mermin deployment scenarios.

Production-Ready Configuration

Optimized for reliability, security, and comprehensive observability in production environments.

# Production configuration for Mermin
log_level = "info"
shutdown_timeout = "30s"

# Defaults are optimized for typical production workloads (1K-5K flows/sec)
pipeline {
  ebpf_max_flows = 500000        # For high-traffic ingress (>10K flows/sec)
  ebpf_ringbuf_worker_capacity = 2048   # Default buffer per worker
  flow_producer_store_capacity = 32768    # Initial flow store size
  worker_count = 8               # For very busy nodes
  k8s_decorator_threads = 12     # For very large clusters
  flow_producer_channel_capacity = 16384
  k8s_decorator_channel_capacity = 32768
}

# API for health checks (required for liveness/readiness probes)
api {
  enabled = true
  listen_address = "0.0.0.0"
  port = 8080
}

# Metrics for Prometheus scraping
metrics {
  enabled = true
  listen_address = "0.0.0.0"
  port = 10250
}

# Standard tunnel detection
parser {
  geneve_port = 6081
  vxlan_port = 4789
  wireguard_port = 51820
}

# Monitor physical and CNI interfaces
discovery "instrument" {
  interfaces = ["eth*", "ens*", "cni*"]
}

# Full Kubernetes metadata enrichment
discovery "informer" "k8s" {
  informers_sync_timeout = "60s"

  selectors = [
    # Core resources
    { kind = "Pod" },
    { kind = "Service" },
    { kind = "Endpoint" },
    { kind = "EndpointSlice" },
    { kind = "Node" },

    # Workload controllers
    { kind = "Deployment" },
    { kind = "ReplicaSet" },
    { kind = "StatefulSet" },
    { kind = "DaemonSet" },
    { kind = "Job" },
    { kind = "CronJob" },

    # Networking
    { kind = "NetworkPolicy" },
    { kind = "Ingress" }
  ]
}

# Walk owner references for workload attribution
discovery "owners" {
  max_depth = 10
  include_kinds = [
    "Deployment",
    "StatefulSet",
    "DaemonSet",
    "ReplicaSet",
    "Job",
    "CronJob"
  ]
}

# Enable selector-based relations (NetworkPolicy, Service)
discovery "selector_relations" {}

# Extract comprehensive source metadata
attributes {
  source {
    extract {
      pod_labels = []          # All labels
      pod_annotations = []      # All annotations
      namespace_labels = []
    }

    association {
      pod = { enabled = true }
      service = { enabled = true }
      node = { enabled = true }
      endpoint = { enabled = true }
      endpointslice = { enabled = true }
      ingress = { enabled = true }
      networkpolicy = { enabled = true }
    }
  }

  destination {
    extract {
      pod_labels = []
      pod_annotations = []
      namespace_labels = []
    }

    association {
      pod = { enabled = true }
      service = { enabled = true }
      node = { enabled = true }
      endpoint = { enabled = true }
      endpointslice = { enabled = true }
      ingress = { enabled = true }
      networkpolicy = { enabled = true }
    }
  }
}

# Balanced flow timeouts
span {
  max_record_interval = "1m"
  generic_timeout = "2m"
  icmp_timeout = "30s"
  tcp_timeout = "5m"
  tcp_fin_timeout = "30s"
  tcp_rst_timeout = "15s"
  udp_timeout = "1m"
  community_id_seed = 0
}

# Secure OTLP export with TLS and authentication
export "traces" {
  otlp = {
    endpoint = "otel-collector.observability.svc.cluster.local:4317"
    protocol = "grpc"
    timeout = "30s"

    # Batching for efficiency
    max_batch_size = 1024
    max_batch_interval = "10s"
    max_queue_size = 4096
    max_concurrent_exports = 4
    max_export_timeout = "1m"

    # TLS with CA verification
    tls = {
      insecure = false
      ca_file = "/etc/mermin/certs/ca.crt"
    }

    # Basic authentication
    auth = {
      basic = {
        username = "${OTLP_USERNAME}"
        password = "${OTLP_PASSWORD}"
      }
    }
  }
}

Development/Testing Configuration

Simplified configuration for local development and testing with stdout export.

Cilium CNI Configuration

Optimized for Kubernetes clusters using Cilium CNI.

Calico CNI Configuration

Optimized for Kubernetes clusters using Calico CNI.

High-Throughput Configuration

Optimized for extreme scale environments (>10 Gbps, edge/CDN deployments with >25K flows/sec).

Security-Hardened Configuration

Focused on secure export and minimal attack surface.

Multi-Backend OTLP Configuration

Export to multiple observability backends simultaneously.

OpenTelemetry Collector Configuration for Multi-Backend:

Cloud Platform Configurations

GKE (Google Kubernetes Engine)

EKS (Amazon Elastic Kubernetes Service)

AKS (Azure Kubernetes Service)

Next Steps

Last updated