github-ci: add namespace based IPS tests

Implementing a bridge test with af-packet IPS and a routing test
with iptables + nfqueue.

Very helpful explanation and guidance on network namespaces can be
found here: https://www.redhat.com/en/blog/net-namespaces

Sets up 3 network namespaces:
1. client - for running client tools like ping, curl, wget
2. server - for running a server, currently only Caddy
3. dut - for running Suricata, this namespace connects the client and
   server namespaces

Validate IPS operations in 3 ways:
1. check return codes of the client tools
2. check Suricata's IPS stats
3. use tshark to validate expected drops

Run Suricata in AF_PACKET IPS mode for both autofp and workers mode. Do
the same for NFQUEUE.

Tshark's JSON output is used with JQ to validate that pings are dropped.

All tests are codecov enabled.
pull/14721/head
Victor Julien 3 months ago
parent 2cf9a327d5
commit 49783fa9f7

@ -1,7 +1,7 @@
codecov:
require_ci_to_pass: false
notify:
after_n_builds: 5
after_n_builds: 6
coverage:
precision: 2

@ -1897,6 +1897,132 @@ jobs:
- run: |
./qa/unix.sh "suricata-verify/"
ubuntu-latest-namespace-ips:
name: Ubuntu 24.04 (afpacket IPS tests in namespaces)
runs-on: ubuntu-latest
needs: [prepare-deps, prepare-cbindgen]
container:
image: ubuntu:24.04
options: --privileged
steps:
- name: Cache ~/.cargo
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb
with:
path: ~/.cargo/registry
key: cargo-registry
- name: Determine number of CPUs
run: echo CPUS=$(nproc --all) >> $GITHUB_ENV
- name: Install dependencies
run: |
apt update
apt -y install \
libpcre2-dev \
build-essential \
autoconf \
automake \
llvm-19-dev \
clang-19 \
git \
hwloc \
libhwloc-dev \
jq \
inetutils-ping \
libc++-dev \
libc++abi-dev \
libtool \
libpcap-dev \
libnet1-dev \
libyaml-0-2 \
libyaml-dev \
libcap-ng-dev \
libcap-ng0 \
libmagic-dev \
libnetfilter-queue-dev \
libnetfilter-queue1 \
libnfnetlink-dev \
libnfnetlink0 \
libnuma-dev \
libhiredis-dev \
libjansson-dev \
libevent-dev \
libevent-pthreads-2.1-7 \
make \
parallel \
python3-yaml \
software-properties-common \
sudo \
zlib1g \
zlib1g-dev \
exuberant-ctags \
unzip \
curl \
time \
wget \
caddy \
ethtool \
iproute2 \
iptables \
tshark
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
- uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131
with:
name: prep
path: prep
# packaged Rust version is too old for coverage, so get from rustup. 1.85.1 matches
# LLVM 19
- name: Install Rust
run: curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain 1.85.1 -y
- uses: ./.github/actions/install-cbindgen
- run: ./autogen.sh
- run: ./configure --disable-shared --localstatedir=/var --prefix=/usr --sysconfdir=/etc --enable-nfqueue
env:
CC: "clang-19"
CXX: "clang++-19"
RUSTFLAGS: "-C instrument-coverage"
CFLAGS: "-fprofile-instr-generate -fcoverage-mapping -O0"
CXXFLAGS: "-fprofile-instr-generate -fcoverage-mapping -O0"
- run: make -j ${{ env.CPUS }}
env:
CC: "clang-19"
CXX: "clang++-19"
RUSTFLAGS: "-C instrument-coverage"
CFLAGS: "-fprofile-instr-generate -fcoverage-mapping -O0"
CXXFLAGS: "-fprofile-instr-generate -fcoverage-mapping -O0"
- run: |
./.github/workflows/netns/afp-ips-netns-bridge.sh "2" "workers" ".github/workflows/netns/ips-netns.yaml"
env:
LLVM_PROFILE_FILE: "/tmp/afp-ips.profraw"
- run: llvm-profdata-19 merge -o afp-ips.profdata /tmp/afp-ips.profraw
- run: |
./.github/workflows/netns/afp-ips-netns-bridge.sh "2" "autofp" ".github/workflows/netns/ips-netns.yaml"
env:
LLVM_PROFILE_FILE: "/tmp/afp-ips-autofp.profraw"
- run: llvm-profdata-19 merge -o afp-ips-autofp.profdata /tmp/afp-ips-autofp.profraw
- run: |
./.github/workflows/netns/nfq-ips-netns-route.sh "autofp" ".github/workflows/netns/ips-netns.yaml"
env:
LLVM_PROFILE_FILE: "/tmp/nfq-ips.profraw"
- run: llvm-profdata-19 merge -o nfq-ips.profdata /tmp/nfq-ips.profraw
- run: |
./.github/workflows/netns/nfq-ips-netns-route.sh "workers" ".github/workflows/netns/ips-netns.yaml"
env:
LLVM_PROFILE_FILE: "/tmp/nfq-ips-workers.profraw"
- run: llvm-profdata-19 merge -o nfq-ips-workers.profdata /tmp/nfq-ips-workers.profraw
- run: llvm-profdata-19 merge -o combined.profdata afp-ips.profdata nfq-ips.profdata afp-ips-autofp.profdata nfq-ips-workers.profdata
- run: llvm-cov-19 show ./src/suricata -instr-profile=combined.profdata --show-instantiations --ignore-filename-regex="^/root/.*" > coverage.txt
- name: Upload coverage to Codecov
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de
with:
fail_ci_if_error: false
flags: netns
verbose: true
ubuntu-24-04-asan-afpdpdk:
name: Ubuntu 24.04 (afpacket and dpdk live tests with ASAN)
runs-on: ubuntu-latest

@ -0,0 +1,243 @@
#!/bin/bash
# Script to test live IPS capabilities for AF_PACKET.
#
# Uses 3 network namespaces:
# - client
# - server
# - dut
#
# Dut is where Suricata will run:
#
# [ client ]$clientif - $dutclientif[ dut ]$dutserverif - $serverif[ server ]
#
# By copying packets between the dut interfaces, Suricata becomes the bridge.
# Call with following arguments:
# 1st: "2" or "3" to indicate the tpacket version.
# 2nd: runmode string (single/autofp/workers)
# 3rd: suricata yaml to use
set -e
set -x
if [ $# -ne "3" ]; then
echo "ERROR call with 3 args: tpacket version (2/3), runmode (single/autofp/workers) and yaml"
exit 1;
fi
TPACKET=$1
RUNMODE=$2
YAML=$3
# dump some info
echo "* printing some diagnostics..."
ip netns list
uname -a
ip r
echo "* printing some diagnostics... done"
clientns=client
serverns=server
dutns=dut
clientip="10.10.10.10/24"
serverip='10.10.10.20/24'
clientif=client
serverif=server
dutclientif=dut_client
dutserverif=dut_server
echo "* removing old namespaces..."
NAMESPACES=$(ip netns list|cut -d' ' -f1)
for NS in $NAMESPACES; do
if [ $NS = $dutns ] || [ $NS = $clientns ] || [ $NS = $serverns ]; then
ip netns delete $NS
fi
done
echo "* removing old namespaces... done"
# remove eve.json from previous run
if [ -f eve.json ]; then
rm eve.json
fi
if [ -e ./rust/target/release/suricatasc ]; then
SURICATASC=./rust/target/release/suricatasc
else
SURICATASC=./rust/target/debug/suricatasc
fi
RES=0
# adding namespaces
echo "* creating namespaces..."
ip netns add $clientns
ip netns add $serverns
ip netns add $dutns
echo "* creating namespaces... done"
#diagnostics output
echo "* list namespaces..."
ip netns list
ip netns exec $clientns ip ad
ip netns exec $serverns ip ad
ip netns exec $dutns ip ad
echo "* list namespaces... done"
# create virtual ethernet link between client-dut and server-dut
# These are not yet mapped to a namespace
echo "* creating virtual ethernet devices..."
ip link add ptp-$clientif type veth peer name ptp-$dutclientif
ip link add ptp-$serverif type veth peer name ptp-$dutserverif
echo "* creating virtual ethernet devices...done"
echo "* list interface in global namespace..."
ip link
echo "* list interface in global namespace... done"
echo "* map virtual ethernet interfaces to their namespaces..."
ip link set ptp-$clientif netns $clientns
ip link set ptp-$serverif netns $serverns
ip link set ptp-$dutclientif netns $dutns
ip link set ptp-$dutserverif netns $dutns
echo "* map virtual ethernet interfaces to their namespaces... done"
echo "* list namespaces and interfaces within them..."
ip netns list
ip netns exec $clientns ip ad
ip netns exec $serverns ip ad
ip netns exec $dutns ip ad
echo "* list namespaces and interfaces within them... done"
# bring up interfaces. Client and server get IP's.
# Disable rx and tx csum offload on all sides.
echo "* setup client interface..."
ip netns exec $clientns ip addr add $clientip dev ptp-$clientif
ip netns exec $clientns ethtool -K ptp-$clientif rx off tx off
ip netns exec $clientns ip link set ptp-$clientif up
echo "* setup client interface... done"
echo "* setup server interface..."
ip netns exec $serverns ip addr add $serverip dev ptp-$serverif
ip netns exec $serverns ethtool -K ptp-$serverif rx off tx off
ip netns exec $serverns ip link set ptp-$serverif up
echo "* setup server interface... done"
echo "* setup dut interfaces..."
ip netns exec $dutns ethtool -K ptp-$dutclientif rx off tx off
ip netns exec $dutns ethtool -K ptp-$dutserverif rx off tx off
ip netns exec $dutns ip link set ptp-$dutclientif up
ip netns exec $dutns ip link set ptp-$dutserverif up
echo "* setup dut interfaces... done"
# set first rule file
cp .github/workflows/netns/drop-icmp.rules suricata.rules
RULES="suricata.rules"
echo "* starting Suricata in the \"dut\" namespace..."
# Start Suricata in the dut namespace, then SIGINT after 240 secords. Will
# close it earlier through the unix socket.
timeout --kill-after=300 --preserve-status 240 \
ip netns exec $dutns \
./src/suricata -c $YAML -l ./ --af-packet -v \
--set default-rule-path=. --runmode=$RUNMODE -S $RULES &
SURIPID=$!
sleep 10
echo "* starting Suricata... done"
echo "* starting tshark on in the server namespace..."
timeout --kill-after=240 --preserve-status 180 \
ip netns exec $serverns \
tshark -i ptp-$serverif -T json > tshark-server.json &
TSHARKSERVERPID=$!
sleep 5
echo "* starting tshark on in the server namespace... done, pid $TSHARKSERVERPID"
echo "* starting Caddy..."
# Start Caddy in the server namespace
timeout --kill-after=240 --preserve-status 120 \
ip netns exec $serverns \
caddy file-server --domain 10.10.10.20 --browse &
CADDYPID=$!
sleep 10
echo "* starting Caddy in the \"server\" namespace... done"
echo "* running curl in the \"client\" namespace..."
ip netns exec $clientns \
curl -O https://10.10.10.20/index.html
echo "* running curl in the \"client\" namespace... done"
echo "* running wget in the \"client\" namespace..."
ip netns exec $clientns \
wget https://10.10.10.20/index.html
echo "* running wget in the \"client\" namespace... done"
ping_ip=$(echo $serverip|cut -f1 -d'/')
echo "* running ping $ping_ip in the \"client\" namespace..."
set +e
ip netns exec $clientns \
ping -c 10 $ping_ip
PINGRES=$?
set -e
echo "* running ping in the \"client\" namespace... done"
# pings should have been dropped, so ping reports error
if [ $PINGRES != 1 ]; then
echo "ERROR ping should have failed"
RES=1
fi
# give stats time to get updated
sleep 10
echo "* shutting down tshark..."
kill -INT $TSHARKSERVERPID
wait $TSHARKSERVERPID
echo "* shutting down tshark... done"
ACCEPTED=$(jq -c 'select(.event_type == "stats")' ./eve.json | tail -n1 | jq '.stats.ips.accepted')
BLOCKED=$(jq -c 'select(.event_type == "stats")' ./eve.json | tail -n1 | jq '.stats.ips.blocked')
KERNEL_PACKETS=$(jq -c 'select(.event_type == "stats")' ./eve.json | tail -n1 | jq '.stats.capture.kernel_packets')
echo "ACCEPTED $ACCEPTED BLOCKED $BLOCKED KERNEL_PACKETS $KERNEL_PACKETS"
if [ $KERNEL_PACKETS -eq 0 ]; then
echo "ERROR no packets captured"
RES=1
fi
if [ $ACCEPTED -eq 0 ]; then
echo "ERROR should have seen non-0 accepted"
RES=1
fi
if [ $BLOCKED -ne 10 ]; then
echo "ERROR should have seen 10 blocked"
RES=1
fi
# validate that we didn't receive pings
SERVER_RECV_PING=$(jq -c '.[]' ./tshark-server.json|jq 'select(._source.layers.icmp."icmp.type"=="8")'|wc -l)
echo "* server pings received check (should be 0): $SERVER_RECV_PING"
if [ $SERVER_RECV_PING -ne 0 ]; then
jq '.[]' ./tshark-server.json | jq 'select(._source.layers.icmp)'
RES=1
fi
echo "* server pings received check... done"
echo "* shutting down..."
kill -INT $CADDYPID
wait $CADDYPID
ip netns exec $dutns \
${SURICATASC} -c "shutdown" /var/run/suricata/suricata-command.socket
wait $SURIPID
echo "* shutting down... done"
echo "* dumping some stats..."
cat ./eve.json | jq -c 'select(.tls)'|tail -n1|jq
cat ./eve.json | jq -c 'select(.stats)|.stats.ips'|tail -n1|jq
cat ./eve.json | jq -c 'select(.stats)|.stats.capture'|tail -n1|jq
cat ./eve.json | jq
echo "* dumping some stats... done"
echo "* done: $RES"
exit $RES

@ -0,0 +1 @@
drop icmp any any -> any any (itype:8; sid:1;)

@ -0,0 +1,118 @@
%YAML 1.1
---
# Suricata configuration file. In addition to the comments describing all
# options in this file, full documentation can be found at:
# https://docs.suricata.io/en/latest/configuration/suricata-yaml.html
# This configuration file was generated by Suricata 9.0.0-dev.
suricata-version: "9.0"
##
## Step 1: Inform Suricata about your network
##
vars:
# more specific is better for alert accuracy and performance
address-groups:
HOME_NET: "[192.168.0.0/16,10.0.0.0/8,172.16.0.0/12]"
#HOME_NET: "[192.168.0.0/16]"
#HOME_NET: "[10.0.0.0/8]"
#HOME_NET: "[172.16.0.0/12]"
#HOME_NET: "any"
EXTERNAL_NET: "!$HOME_NET"
#EXTERNAL_NET: "any"
HTTP_SERVERS: "$HOME_NET"
SMTP_SERVERS: "$HOME_NET"
SQL_SERVERS: "$HOME_NET"
DNS_SERVERS: "$HOME_NET"
TELNET_SERVERS: "$HOME_NET"
AIM_SERVERS: "$EXTERNAL_NET"
DC_SERVERS: "$HOME_NET"
DNP3_SERVER: "$HOME_NET"
DNP3_CLIENT: "$HOME_NET"
MODBUS_CLIENT: "$HOME_NET"
MODBUS_SERVER: "$HOME_NET"
ENIP_CLIENT: "$HOME_NET"
ENIP_SERVER: "$HOME_NET"
port-groups:
HTTP_PORTS: "80"
SHELLCODE_PORTS: "!80"
ORACLE_PORTS: 1521
SSH_PORTS: 22
DNP3_PORTS: 20000
MODBUS_PORTS: 502
FILE_DATA_PORTS: "[$HTTP_PORTS,110,143]"
FTP_PORTS: 21
GENEVE_PORTS: 6081
VXLAN_PORTS: 4789
TEREDO_PORTS: 3544
SIP_PORTS: "[5060, 5061]"
##
## Step 2: Select outputs to enable
##
# Global stats configuration
stats:
enabled: yes
# The interval field (in seconds) controls the interval at
# which stats are updated in the log.
interval: 8
# Add decode events to stats.
decoder-events: true
# Decoder event prefix in stats. Has been 'decoder' before, but that leads
# to missing events in the eve.stats records. See issue #2225.
#decoder-events-prefix: "decoder.event"
# Add stream events as stats.
stream-events: true
exception-policy:
per-app-proto-errors: true # default: false. True will log errors for
# each app-proto. Warning: VERY verbose
outputs:
- eve-log:
enabled: yes
filetype: regular #regular|syslog|unix_dgram|unix_stream|redis
filename: eve.json
types:
- alert
- http:
extended: yes # enable this for extended logging information
- dns
- tls:
extended: yes # enable this for extended logging information
- files:
force-magic: no # force logging magic on all logged files
# force logging of checksums, available hash functions are md5,
# sha1 and sha256
#force-hash: [md5]
- drop:
alerts: yes # log alerts that caused drops
flows: all # start or all: 'start' logs only a single drop
verdict: yes
- stats:
totals: yes # stats for all threads merged together
threads: no # per thread stats
deltas: no # include delta values
# Don't log stats counters that are zero. Default: true
#null-values: false # False will NOT log stats counters: 0
- flow
unix-command:
enabled: auto
af-packet:
- interface: ptp-dut_client
cluster-id: 1
copy-iface: ptp-dut_server
- interface: ptp-dut_server
cluster-id: 2
copy-iface: ptp-dut_client
- interface: default
threads: 2
cluster-type: cluster_flow
copy-mode: ips

@ -0,0 +1,255 @@
#!/bin/bash
# Script to test live IPS capabilities for NFQUEUE.
#
# Uses 3 network namespaces:
# - client
# - server
# - dut
#
# Dut is where Suricata will run:
#
# [ client ]$clientif - $dutclientif[ dut ]$dutserverif - $serverif[ server ]
#
# By routing packets between the dut interfaces, Suricata becomes the router.
# Packets will be forwarded by the kernel, sent to Suricata via iptables NFQUEUE
# which can then verdict them.
# Call with following arguments:
# 1st: runmode string (single/autofp/workers)
# 2nd: suricata yaml to use
set -e
set -x
if [ $# -ne "2" ]; then
echo "ERROR call with 2 args: runmode (single/autofp/workers) and yaml"
exit 1;
fi
RUNMODE=$1
YAML=$2
# dump some info
echo "* printing some diagnostics..."
ip netns list
uname -a
ip r
echo "* printing some diagnostics... done"
clientns=client
serverns=server
dutns=dut
clientip="10.10.10.2/24"
clientnet="10.10.10.0/24"
serverip='10.10.20.2/24'
servernet="10.10.20.0/24"
dutclientip="10.10.10.1/24"
dutserverip='10.10.20.1/24'
clientif=client
serverif=server
dutclientif=dut_client
dutserverif=dut_server
echo "* removing old namespaces..."
NAMESPACES=$(ip netns list|cut -d' ' -f1)
for NS in $NAMESPACES; do
if [ $NS = $dutns ] || [ $NS = $clientns ] || [ $NS = $serverns ]; then
ip netns delete $NS
fi
done
echo "* removing old namespaces... done"
# remove eve.json from previous run
if [ -f eve.json ]; then
rm eve.json
fi
if [ -e ./rust/target/release/suricatasc ]; then
SURICATASC=./rust/target/release/suricatasc
else
SURICATASC=./rust/target/debug/suricatasc
fi
RES=0
# adding namespaces
echo "* creating namespaces..."
ip netns add $clientns
ip netns add $serverns
ip netns add $dutns
echo "* creating namespaces... done"
#diagnostics output
echo "* list namespaces..."
ip netns list
ip netns exec $clientns ip ad
ip netns exec $serverns ip ad
ip netns exec $dutns ip ad
echo "* list namespaces... done"
# create virtual ethernet link between client-dut and server-dut
# These are not yet mapped to a namespace
echo "* creating virtual ethernet devices..."
ip link add ptp-$clientif type veth peer name ptp-$dutclientif
ip link add ptp-$serverif type veth peer name ptp-$dutserverif
echo "* creating virtual ethernet devices...done"
echo "* list interface in global namespace..."
ip link
echo "* list interface in global namespace... done"
echo "* map virtual ethernet interfaces to their namespaces..."
ip link set ptp-$clientif netns $clientns
ip link set ptp-$serverif netns $serverns
ip link set ptp-$dutclientif netns $dutns
ip link set ptp-$dutserverif netns $dutns
echo "* map virtual ethernet interfaces to their namespaces... done"
echo "* list namespaces and interfaces within them..."
ip netns list
ip netns exec $clientns ip ad
ip netns exec $serverns ip ad
ip netns exec $dutns ip ad
echo "* list namespaces and interfaces within them... done"
# bring up interfaces. All interfaces get IP's.
echo "* setup client interface..."
ip netns exec $clientns ip addr add $clientip dev ptp-$clientif
ip netns exec $clientns ip link set ptp-$clientif up
echo "* setup client interface... done"
echo "* setup server interface..."
ip netns exec $serverns ip addr add $serverip dev ptp-$serverif
ip netns exec $serverns ip link set ptp-$serverif up
echo "* setup server interface... done"
echo "* setup dut interfaces..."
ip netns exec $dutns ip addr add $dutclientip dev ptp-$dutclientif
ip netns exec $dutns ip addr add $dutserverip dev ptp-$dutserverif
ip netns exec $dutns ip link set ptp-$dutclientif up
ip netns exec $dutns ip link set ptp-$dutserverif up
echo "* setup dut interfaces... done"
echo "* setup client/server routes..."
# routes:
#
# client can reach servernet through the client side ip of the dut
via_ip=$(echo $dutclientip|cut -f1 -d'/')
ip netns exec $clientns ip route add $servernet via $via_ip dev ptp-$clientif
#
# server can reach clientnet through the server side ip of the dut
via_ip=$(echo $dutserverip|cut -f1 -d'/')
ip netns exec $serverns ip route add $clientnet via $via_ip dev ptp-$serverif
echo "* setup client/server routes... done"
echo "* enabling forwarding in the dut..."
# forward all
ip netns exec $dutns sysctl net.ipv4.ip_forward=1
ip netns exec $dutns iptables -I FORWARD 1 -j NFQUEUE
echo "* enabling forwarding in the dut... done"
# set first rule file
cp .github/workflows/netns/drop-icmp.rules suricata.rules
RULES="suricata.rules"
echo "* starting Suricata in the \"dut\" namespace..."
# Start Suricata in the dut namespace, then SIGINT after 240 secords. Will
# close it earlier through the unix socket.
timeout --kill-after=300 --preserve-status 240 \
ip netns exec $dutns \
./src/suricata -c $YAML -l ./ -q 0 -v \
--set default-rule-path=. --runmode=$RUNMODE -S $RULES &
SURIPID=$!
sleep 10
echo "* starting Suricata... done"
echo "* starting tshark on in the server namespace..."
timeout --kill-after=240 --preserve-status 180 \
ip netns exec $serverns \
tshark -i ptp-$serverif -T json > tshark-server.json &
TSHARKSERVERPID=$!
sleep 5
echo "* starting tshark on in the server namespace... done, pid $TSHARKSERVERPID"
echo "* starting Caddy..."
# Start Caddy in the server namespace
timeout --kill-after=240 --preserve-status 120 \
ip netns exec $serverns \
caddy file-server --domain 10.10.20.2 --browse &
CADDYPID=$!
sleep 10
echo "* starting Caddy in the \"server\" namespace... done"
echo "* running curl in the \"client\" namespace..."
ip netns exec $clientns \
curl -O https://10.10.20.2/index.html
echo "* running curl in the \"client\" namespace... done"
echo "* running wget in the \"client\" namespace..."
ip netns exec $clientns \
wget https://10.10.20.2/index.html
echo "* running wget in the \"client\" namespace... done"
ping_ip=$(echo $serverip|cut -f1 -d'/')
echo "* running ping $ping_ip in the \"client\" namespace..."
set +e
ip netns exec $clientns \
ping -c 10 $ping_ip
PINGRES=$?
set -e
echo "* running ping in the \"client\" namespace... done"
# pings should have been dropped, so ping reports error
if [ $PINGRES != 1 ]; then
echo "ERROR ping should have failed"
RES=1
fi
# give stats time to get updated
sleep 10
echo "* shutting down tshark..."
kill -INT $TSHARKSERVERPID
wait $TSHARKSERVERPID
echo "* shutting down tshark... done"
# check stats and alerts
ACCEPTED=$(jq -c 'select(.event_type == "stats")' ./eve.json | tail -n1 | jq '.stats.ips.accepted')
BLOCKED=$(jq -c 'select(.event_type == "stats")' ./eve.json | tail -n1 | jq '.stats.ips.blocked')
echo "ACCEPTED $ACCEPTED BLOCKED $BLOCKED"
if [ $ACCEPTED -eq 0 ]; then
echo "ERROR no packets captured"
RES=1
fi
if [ $BLOCKED -ne 10 ]; then
echo "ERROR should have seen 10 blocked"
RES=1
fi
# validate that we didn't receive pings
SERVER_RECV_PING=$(jq -c '.[]' ./tshark-server.json|jq 'select(._source.layers.icmp."icmp.type"=="8")'|wc -l)
echo "* server pings received check (should be 0): $SERVER_RECV_PING"
if [ $SERVER_RECV_PING -ne 0 ]; then
jq '.[]' ./tshark-server.json | jq 'select(._source.layers.icmp)'
RES=1
fi
echo "* server pings received check... done"
echo "* shutting down..."
kill -INT $CADDYPID
wait $CADDYPID
ip netns exec $dutns \
${SURICATASC} -c "shutdown" /var/run/suricata/suricata-command.socket
wait $SURIPID
echo "* shutting down... done"
echo "* dumping some stats..."
cat ./eve.json | jq -c 'select(.tls)'|tail -n1|jq
cat ./eve.json | jq -c 'select(.stats)|.stats.ips'|tail -n1|jq
cat ./eve.json | jq
echo "* dumping some stats... done"
echo "* done: $RES"
exit $RES
Loading…
Cancel
Save