af-packet: use per CPU hash in bypass

eBPF has a data type which is a per CPU array. By adding one element
to the array it is in fact added to all per CPU arrays in the kernel.
This allows to have a lockless structure in the kernel even when doing
counter update.

In userspace, we need to update the flow bypass code to fetch all
elements of the per CPU arrays.
pull/3221/head
Eric Leblond 7 years ago
parent 8640cc5dcf
commit 31c947b4d8

@ -41,14 +41,14 @@ struct pair {
} __attribute__((__aligned__(8)));
struct bpf_map_def SEC("maps") flow_table_v4 = {
.type = BPF_MAP_TYPE_HASH,
.type = BPF_MAP_TYPE_PERCPU_HASH,
.key_size = sizeof(struct flowv4_keys),
.value_size = sizeof(struct pair),
.max_entries = 32768,
};
struct bpf_map_def SEC("maps") flow_table_v6 = {
.type = BPF_MAP_TYPE_HASH,
.type = BPF_MAP_TYPE_PERCPU_HASH,
.key_size = sizeof(struct flowv6_keys),
.value_size = sizeof(struct pair),
.max_entries = 32768,
@ -103,8 +103,8 @@ static __always_inline int ipv4_filter(struct __sk_buff *skb)
bpf_trace_printk(bfmt, sizeof(bfmt), tuple.src, sp, tuple.dst);
}
#endif
__sync_fetch_and_add(&value->packets, 1);
__sync_fetch_and_add(&value->bytes, skb->len);
value->packets++;
value->bytes += skb->len;
value->time = bpf_ktime_get_ns();
return 0;
}
@ -156,8 +156,8 @@ static __always_inline int ipv6_filter(struct __sk_buff *skb)
if (value) {
//char fmt[] = "Got a match IPv6: %u and %u\n";
//bpf_trace_printk(fmt, sizeof(fmt), tuple.port16[0], tuple.port16[1]);
__sync_fetch_and_add(&value->packets, 1);
__sync_fetch_and_add(&value->bytes, skb->len);
value->packets++;
value->bytes += skb->len;
value->time = bpf_ktime_get_ns();
return 0;
}

@ -42,15 +42,14 @@ typedef struct BypassedFlowManagerThreadData_ {
static int BypassedFlowV4Timeout(int fd, struct flowv4_keys *key, struct pair *value, void *data)
{
struct timespec *curtime = (struct timespec *)data;
SCLogDebug("Got curtime %" PRIu64 " and value %" PRIu64 " (sp:%d, dp:%d)",
SCLogDebug("Got curtime %" PRIu64 " and value %" PRIu64 " (sp:%d, dp:%d) %u",
curtime->tv_sec, value->time / 1000000000,
key->port16[0], key->port16[1]
key->port16[0], key->port16[1], key->ip_proto
);
if (curtime->tv_sec - value->time / 1000000000 > BYPASSED_FLOW_TIMEOUT) {
SCLogDebug("Got no packet for %d -> %d at %" PRIu64,
key->port16[0], key->port16[1], value->time);
EBPFDeleteKey(fd, key);
return 1;
}
return 0;

@ -37,12 +37,13 @@
#ifdef HAVE_PACKET_EBPF
#include "util-ebpf.h"
#include "util-cpu.h"
#include <bpf/libbpf.h>
#include <bpf/bpf.h>
#include "config.h"
#include "util-ebpf.h"
#define BPF_MAP_MAX_COUNT 16
struct bpf_map_item {
@ -180,32 +181,43 @@ int EBPFForEachFlowV4Table(const char *name,
{
int mapfd = EBPFGetMapFDByName(name);
struct flowv4_keys key = {}, next_key;
struct pair value = {0, 0, 0};
int ret, found = 0;
int found = 0;
unsigned int i;
unsigned int nr_cpus = UtilCpuGetNumProcessorsConfigured();
if (nr_cpus == 0) {
SCLogWarning(SC_ERR_INVALID_VALUE, "Unable to get CPU count");
return 0;
}
struct pair values_array[nr_cpus];
if (bpf_map_get_next_key(mapfd, &key, &next_key) != 0) {
return found;
}
while (bpf_map_get_next_key(mapfd, &key, &next_key) == 0) {
bpf_map_lookup_elem(mapfd, &key, &value);
ret = FlowCallback(mapfd, &key, &value, data);
if (ret) {
int iret = 1;
int pkts_cnt = 0;
int bytes_cnt = 0;
bpf_map_lookup_elem(mapfd, &key, values_array);
for (i = 0; i < nr_cpus; i++) {
int ret = FlowCallback(mapfd, &key, &values_array[i], data);
if (ret) {
pkts_cnt += values_array[i].packets;
bytes_cnt += values_array[i].bytes;
} else {
iret = 0;
break;
}
}
if (iret) {
flowstats->count++;
flowstats->packets += value.packets;
flowstats->bytes += value.bytes;
flowstats->packets += pkts_cnt;
flowstats->bytes += bytes_cnt;
found = 1;
EBPFDeleteKey(mapfd, &key);
}
key = next_key;
}
bpf_map_lookup_elem(mapfd, &key, &value);
ret = FlowCallback(mapfd, &key, &value, data);
if (ret) {
flowstats->count++;
flowstats->packets += value.packets;
flowstats->bytes += value.bytes;
found = 1;
}
return found;
}
@ -217,31 +229,43 @@ int EBPFForEachFlowV6Table(const char *name,
int mapfd = EBPFGetMapFDByName(name);
struct flowv6_keys key = {}, next_key;
struct pair value = {0, 0, 0};
int ret, found = 0;
int found = 0;
unsigned int i;
unsigned int nr_cpus = UtilCpuGetNumProcessorsConfigured();
if (nr_cpus == 0) {
SCLogWarning(SC_ERR_INVALID_VALUE, "Unable to get CPU count");
return 0;
}
struct pair values_array[nr_cpus];
if (bpf_map_get_next_key(mapfd, &key, &next_key) != 0) {
return found;
}
while (bpf_map_get_next_key(mapfd, &key, &next_key) == 0) {
bpf_map_lookup_elem(mapfd, &key, &value);
ret = FlowCallback(mapfd, &key, &value, data);
if (ret) {
int iret = 1;
int pkts_cnt = 0;
int bytes_cnt = 0;
bpf_map_lookup_elem(mapfd, &key, values_array);
for (i = 0; i < nr_cpus; i++) {
int ret = FlowCallback(mapfd, &key, &values_array[i], data);
if (ret) {
pkts_cnt += values_array[i].packets;
bytes_cnt += values_array[i].bytes;
} else {
iret = 0;
break;
}
}
if (iret) {
flowstats->count++;
flowstats->packets += value.packets;
flowstats->bytes += value.bytes;
flowstats->packets += pkts_cnt;
flowstats->bytes += bytes_cnt;
found = 1;
EBPFDeleteKey(mapfd, &key);
}
key = next_key;
}
bpf_map_lookup_elem(mapfd, &key, &value);
ret = FlowCallback(mapfd, &key, &value, data);
if (ret) {
flowstats->count++;
flowstats->packets += value.packets;
flowstats->bytes += value.bytes;
found = 1;
}
return found;
}

Loading…
Cancel
Save