ebpf: improve xdp-cpu-redirect distribution in xdp_filter.c

The XDP CPU destination array/set, configured via xdp-cpu-redirect,
will always be fairly small.  My different benchmarking showed that
the current modulo hashing into the CPU array can easily result in bad
distribution, expecially if the number of CPU is an even number.

This patch uses a proper hashing function on the input key. The key
used for hashing is inspired by the ippair hashing code in
src/tmqh-flow.c, and is based on the IP src + dst.

An important property is that the hashing is flow symmetric, meaning
that if the source and destintation gets swapped then the selected CPU
will remain the same.  This is important for Suricate.

That hashing INITVAL (15485863 the 10^6th prime number) was fairly
arbitrary choosen, but experiments with kernel tree pktgen scripts
(pktgen_sample04_many_flows.sh +pktgen_sample05_flow_per_thread.sh)
showed this improved the distribution.

Signed-off-by: Jesper Dangaard Brouer <netoptimizer@brouer.com>
pull/3223/head
Jesper Dangaard Brouer 7 years ago committed by Victor Julien
parent 460a0a6977
commit 796ec08dd7

@ -31,8 +31,13 @@
#include <linux/udp.h>
#include "bpf_helpers.h"
#include "hash_func01.h"
#define LINUX_VERSION_CODE 263682
/* Hashing initval */
#define INITVAL 15485863
#define CPUMAP_MAX_CPUS 64
struct vlan_hdr {
@ -171,6 +176,7 @@ static int __always_inline filter_ipv4(void *data, __u64 nh_off, void *data_end)
uint32_t key0 = 0;
uint32_t *cpu_max = bpf_map_lookup_elem(&cpus_count, &key0);
uint32_t *cpu_selected;
uint32_t cpu_hash;
int *iface_peer;
int tx_port = 0;
@ -219,8 +225,12 @@ static int __always_inline filter_ipv4(void *data, __u64 nh_off, void *data_end)
}
}
/* IP-pairs + protocol (UDP/TCP/ICMP) hit same CPU */
cpu_hash = tuple.src + tuple.dst;
cpu_hash = SuperFastHash((char *)&cpu_hash, 4, INITVAL + iph->protocol);
if (cpu_max && *cpu_max) {
cpu_dest = (tuple.src + tuple.dst) % *cpu_max;
cpu_dest = cpu_hash % *cpu_max;
cpu_selected = bpf_map_lookup_elem(&cpus_available, &cpu_dest);
if (!cpu_selected)
return XDP_ABORTED;
@ -242,6 +252,7 @@ static int __always_inline filter_ipv6(void *data, __u64 nh_off, void *data_end)
uint32_t key0 = 0;
int *cpu_max = bpf_map_lookup_elem(&cpus_count, &key0);
uint32_t *cpu_selected;
uint32_t cpu_hash;
int tx_port = 0;
int *iface_peer;
@ -281,8 +292,16 @@ static int __always_inline filter_ipv6(void *data, __u64 nh_off, void *data_end)
return bpf_redirect_map(&tx_peer, tx_port, 0);
}
}
/* IP-pairs + protocol (UDP/TCP/ICMP) hit same CPU */
cpu_hash = tuple.src[0] + tuple.dst[0];
cpu_hash += tuple.src[1] + tuple.dst[1];
cpu_hash += tuple.src[2] + tuple.dst[2];
cpu_hash += tuple.src[3] + tuple.dst[3];
cpu_hash = SuperFastHash((char *)&cpu_hash, 4, ip6h->nexthdr);
if (cpu_max && *cpu_max) {
cpu_dest = (tuple.src[0] + tuple.dst[0] + tuple.src[3] + tuple.dst[3]) % *cpu_max;
cpu_dest = cpu_hash % *cpu_max;
cpu_selected = bpf_map_lookup_elem(&cpus_available, &cpu_dest);
if (!cpu_selected)
return XDP_ABORTED;

Loading…
Cancel
Save