githubEdit

Configuration Examples

This page provides complete, real-world configuration examples for common Mermin deployment scenarios.

Production-Ready Configuration

Optimized for reliability, security, and comprehensive observability in production environments.

# Production configuration for Mermin
log_level = "info"
shutdown_timeout = "30s"

# Defaults are optimized for typical production workloads (1K-5K flows/sec)
pipeline {
  flow_capture {
    flow_stats_capacity = 500000        # For high-traffic ingress (>10K flows/sec)
    flow_events_capacity = 1024         # Ring buffer capacity (entries)
  }
  flow_producer {
    workers = 8                          # For very busy nodes
    worker_queue_capacity = 2048         # Default buffer per worker
    flow_store_poll_interval = "5s"      # Polling interval
    flow_span_queue_capacity = 16384     # Buffer to K8s decorator
  }
  k8s_decorator {
    threads = 12                         # For very large clusters
    decorated_span_queue_capacity = 32768  # Buffer to exporter
  }
}

# HTTP server for health checks (required for liveness/readiness probes)
internal "server" {
  enabled = true
  listen_address = "0.0.0.0"
  port = 8080
}

# Metrics for Prometheus scraping
internal "metrics" {
  enabled = true
  listen_address = "0.0.0.0"
  port = 10250
}

# Standard tunnel detection
parser {
  geneve_port = 6081
  vxlan_port = 4789
  wireguard_port = 51820
}

# Monitor physical and CNI interfaces
discovery "instrument" {
  interfaces = ["eth*", "ens*", "cni*"]
}

# Full Kubernetes metadata enrichment
discovery "informer" "k8s" {
  informers_sync_timeout = "60s"

  selectors = [
    # Core resources
    { kind = "Pod" },
    { kind = "Service" },
    { kind = "Endpoint" },
    { kind = "EndpointSlice" },
    { kind = "Node" },

    # Workload controllers
    { kind = "Deployment" },
    { kind = "ReplicaSet" },
    { kind = "StatefulSet" },
    { kind = "DaemonSet" },
    { kind = "Job" },
    { kind = "CronJob" },

    # Networking
    { kind = "NetworkPolicy" },
    { kind = "Ingress" }
  ]

  # Walk owner references for workload attribution
  owner_relations = {
    max_depth = 10
    include_kinds = [
      "Deployment",
      "StatefulSet",
      "DaemonSet",
      "ReplicaSet",
      "Job",
      "CronJob"
    ]
  }

  # Enable selector-based relations (NetworkPolicy, Service)
  selector_relations = [
    { kind = "NetworkPolicy", to = "Pod", selector_match_labels_field = "spec.podSelector.matchLabels", selector_match_expressions_field = "spec.podSelector.matchExpressions" },
    { kind = "Service", to = "Pod", selector_match_labels_field = "spec.selector" }
  ]
}

# Extract comprehensive source metadata
attributes "source" "k8s" {
  extract {
    metadata = [
      "[*].metadata.namespace",
      "[*].metadata.name",
      "[*].metadata.uid"
    ]
  }

  association {
    pod = {
      sources = [
        { from = "source.ip", to = ["status.podIP", "status.podIPs[*]", "status.hostIP", "status.hostIPs[*]"] },
        { from = "source.port", to = ["spec.containers[*].ports[*].containerPort", "spec.containers[*].ports[*].hostPort"] },
        { from = "network.transport", to = ["spec.containers[*].ports[*].protocol"] }
      ]
    }
    service = {
      sources = [
        { from = "source.ip", to = ["spec.clusterIP", "spec.clusterIPs[*]", "spec.externalIPs[*]", "spec.loadBalancerIP", "spec.externalName"] },
        { from = "source.port", to = ["spec.ports[*].port"] },
        { from = "network.transport", to = ["spec.ports[*].protocol"] },
        { from = "network.type", to = ["spec.ipFamilies[*]"] }
      ]
    }
    node = {
      sources = [
        { from = "source.ip", to = ["status.addresses[*].address"] }
      ]
    }
    endpoint = {
      sources = [
        { from = "source.ip", to = ["subsets[*].addresses[*].ip"] },
        { from = "source.port", to = ["subsets[*].ports[*].port"] },
        { from = "network.transport", to = ["subsets[*].ports[*].protocol"] }
      ]
    }
    endpointslice = {
      sources = [
        { from = "source.ip", to = ["endpoints[*].addresses[*]"] },
        { from = "source.port", to = ["ports[*].port"] },
        { from = "network.transport", to = ["ports[*].protocol"] },
        { from = "network.type", to = ["addressType"] }
      ]
    }
    ingress = {
      sources = [
        { from = "source.ip", to = ["status.loadBalancer.ingress[*].ip", "status.loadBalancer.ingress[*].hostname"] },
        { from = "source.port", to = ["spec.defaultBackend.service.port", "spec.rules[*].http.paths[*].backend.service.port.number"] }
      ]
    }
    networkpolicy = {
      sources = [
        { from = "source.ip", to = ["spec.ingress[*].from[*].ipBlock.cidr", "spec.egress[*].to[*].ipBlock.cidr"] },
        { from = "source.port", to = ["spec.ingress[*].ports[*].port", "spec.egress[*].ports[*].port"] },
        { from = "network.transport", to = ["spec.ingress[*].ports[*].protocol", "spec.egress[*].ports[*].protocol"] }
      ]
    }
  }
}

attributes "destination" "k8s" {
  extract {
    metadata = [
      "[*].metadata.name",
      "[*].metadata.namespace",
      "pod.metadata.uid"
    ]
  }

  association {
    pod = {
      sources = [
        { from = "destination.ip", to = ["status.podIP", "status.podIPs[*]", "status.hostIP", "status.hostIPs[*]"] },
        { from = "destination.port", to = ["spec.containers[*].ports[*].containerPort", "spec.containers[*].ports[*].hostPort"] },
        { from = "network.transport", to = ["spec.containers[*].ports[*].protocol"] }
      ]
    }
    service = {
      sources = [
        { from = "destination.ip", to = ["spec.clusterIP", "spec.clusterIPs[*]", "spec.externalIPs[*]", "spec.loadBalancerIP", "spec.externalName"] },
        { from = "destination.port", to = ["spec.ports[*].port"] },
        { from = "network.transport", to = ["spec.ports[*].protocol"] },
        { from = "network.type", to = ["spec.ipFamilies[*]"] }
      ]
    }
    node = {
      sources = [
        { from = "destination.ip", to = ["status.addresses[*].address"] }
      ]
    }
    endpoint = {
      sources = [
        { from = "destination.ip", to = ["subsets[*].addresses[*].ip"] },
        { from = "destination.port", to = ["subsets[*].ports[*].port"] },
        { from = "network.transport", to = ["subsets[*].ports[*].protocol"] }
      ]
    }
    endpointslice = {
      sources = [
        { from = "destination.ip", to = ["endpoints[*].addresses[*]"] },
        { from = "destination.port", to = ["ports[*].port"] },
        { from = "network.transport", to = ["ports[*].protocol"] }
      ]
    }
    ingress = {
      sources = [
        { from = "destination.ip", to = ["status.loadBalancer.ingress[*].ip", "status.loadBalancer.ingress[*].hostname"] },
        { from = "destination.port", to = ["spec.defaultBackend.service.port", "spec.rules[*].http.paths[*].backend.service.port.number"] }
      ]
    }
    networkpolicy = {
      sources = [
        { from = "destination.ip", to = ["spec.ingress[*].from[*].ipBlock.cidr", "spec.egress[*].to[*].ipBlock.cidr"] },
        { from = "destination.port", to = ["spec.ingress[*].ports[*].port", "spec.egress[*].ports[*].port"] },
        { from = "network.transport", to = ["spec.ingress[*].ports[*].protocol", "spec.egress[*].ports[*].protocol"] }
      ]
    }
  }
}

# Balanced flow timeouts
span {
  max_record_interval = "1m"
  generic_timeout = "2m"
  icmp_timeout = "30s"
  tcp_timeout = "5m"
  tcp_fin_timeout = "30s"
  tcp_rst_timeout = "15s"
  udp_timeout = "1m"
  community_id_seed = 0
}

# Secure OTLP export with TLS and authentication
export "traces" {
  otlp = {
    endpoint = "otel-collector.observability.svc.cluster.local:4317"
    protocol = "grpc"
    timeout = "30s"

    # Batching for efficiency
    max_batch_size = 1024
    max_batch_interval = "10s"
    max_queue_size = 4096
    max_concurrent_exports = 4
    max_export_timeout = "1m"

    # TLS with CA verification
    tls = {
      insecure_skip_verify = false
      ca_cert = "/etc/mermin/certs/ca.crt"
    }

    # Basic authentication
    auth = {
      basic = {
        user = "${OTLP_USERNAME}"
        pass = "${OTLP_PASSWORD}"
      }
    }
  }
}

Development/Testing Configuration

Simplified configuration for local development and testing with stdout export.

Cilium CNI Configuration

Optimized for Kubernetes clusters using Cilium CNI.

Calico CNI Configuration

Optimized for Kubernetes clusters using Calico CNI.

High-Throughput Configuration

Optimized for extreme scale environments (>10 Gbps, edge/CDN deployments with >25K flows/sec).

Security-Hardened Configuration

Focused on secure export and minimal attack surface.

Multi-Backend OTLP Configuration

circle-info

Note: Mermin currently supports one OTLP endpoint per instance. For multi-backend export, use an OpenTelemetry Collector as an intermediary:

Mermin → OTel Collector → Multiple Backends

See Observability Backends for collector configuration.

Export to multiple observability backends simultaneously.

OpenTelemetry Collector Configuration for Multi-Backend:

Cloud Platform Configurations

GKE (Google Kubernetes Engine)

EKS (Amazon Elastic Kubernetes Service)

AKS (Azure Kubernetes Service)

Next Steps

  1. Deploy to Kubernetes: Use these configurations in production

  2. Review Deployment Options: Choose the right deployment method

Need Help?

Last updated