mirror of https://github.com/OISF/suricata
Remove all cuda related code in the engine except for the cuda api wrappers
parent
e2a6cfb6a6
commit
b787da5643
File diff suppressed because it is too large
Load Diff
@ -1,145 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2010 Open Information Security Foundation.
|
||||
*
|
||||
* \author Anoop Saldanha <anoopsaldanha@gmail.com>
|
||||
*/
|
||||
|
||||
#ifndef __CUDA_PACKET_BATCHER_H__
|
||||
#define __CUDA_PACKET_BATCHER_H__
|
||||
|
||||
#include "suricata-common.h"
|
||||
|
||||
/* compile in, only if we have a CUDA enabled on this machine */
|
||||
#ifdef __SC_CUDA_SUPPORT__
|
||||
|
||||
#include "util-cuda.h"
|
||||
|
||||
/* The min no of packets that we allot the buffer for. We will make
|
||||
* this user configurable(yaml) based on the traffic they expect. Either ways
|
||||
* for a low/medium traffic network with occasional sgh matches, we shouldn't
|
||||
* be enabling cuda. We will only end up screwing performance */
|
||||
#define SC_CUDA_PB_MIN_NO_OF_PACKETS 4000
|
||||
|
||||
/* the maximum payload size we're sending to the card (defined in decode.h) */
|
||||
#define SC_CUDA_PB_MAX_PAYLOAD_SIZE CUDA_MAX_PAYLOAD_SIZE
|
||||
|
||||
/**
|
||||
* \brief Implement the template SCDQGenericQData to transfer the cuda
|
||||
* packet buffer from the cuda batcher thread to the dispatcher
|
||||
* thread using the queue SCDQDataQueue.
|
||||
*/
|
||||
typedef struct SCCudaPBPacketsBuffer_ {
|
||||
/* these members from the template SCDQGenericQData that have to be
|
||||
* compulsarily implemented */
|
||||
struct SCDQGenericQData_ *next;
|
||||
struct SCDQGenericQData_ *prev;
|
||||
/* if we want to consider this pointer as the head of a list, this var
|
||||
* holds the no of elements in the list */
|
||||
//uint16_t len;
|
||||
/* in case this data instance is the head of a list, we can refer the
|
||||
* bottomost instance directly using this var */
|
||||
//struct SCDQGenericaQData *bot;
|
||||
|
||||
/* our own members from here on*/
|
||||
|
||||
/* current count of packets held in packets_buffer. nop = no of packets */
|
||||
uint32_t nop_in_buffer;
|
||||
/* the packets buffer. We will assign buffer for SC_CUDA_PB_MIN_NO_OF_PACKETS
|
||||
* packets. Basically the size of this buffer would be
|
||||
* SC_CUDA_PB_MIN_NO_OF_PACKETS * sizeof(SCCudaPBPacketDataForGPU), so that
|
||||
* we can hold mininum SC_CUDA_PB_MIN_NO_OF_PACKETS */
|
||||
uint8_t *packets_buffer;
|
||||
/* length of data buffered so far in packets_buffer, which would be sent
|
||||
* to the GPU. We will need this to copy the buffered data from the
|
||||
* packets_buffer here on the host, to the buffer on the GPU */
|
||||
uint32_t packets_buffer_len;
|
||||
/* packet offset within the packets_buffer. Each packet would be stored in
|
||||
* packets buffer at a particular offset. This buffer would indicate the
|
||||
* offset of a packet inside the packet buffer. We will allot space to hold
|
||||
* offsets for SC_CUDA_PB_MIN_NO_OF_PACKETS packets
|
||||
* \todo change it to holds offsets for more than SC_CUDA_PB_MIN_NO_OF_PACKETS
|
||||
* when we use the buffer to hold packets based on the remaining size in the
|
||||
* buffer rather than on a fixed limit like SC_CUDA_PB_MIN_NO_OF_PACKETS */
|
||||
uint32_t *packets_offset_buffer;
|
||||
|
||||
/* the total packet payload lengths buffered so far. We will need this to
|
||||
* transfer the total length of the results buffer that has to be transferred
|
||||
* back from the gpu */
|
||||
uint32_t packets_total_payload_len;
|
||||
/* the payload offsets for the different payload lengths buffered in. For
|
||||
* example if we buffer 4 packets of lengths 3, 4, 5, 6, we will store four
|
||||
* offsets in the buffer {0, 3, 7, 12, 18} */
|
||||
uint32_t *packets_payload_offset_buffer;
|
||||
|
||||
/* packet addresses for all the packets buffered in the packets_buffer. We
|
||||
* will allot space to hold packet addresses for SC_CUDA_PB_MIN_NO_OF_PACKETS.
|
||||
* We will need this, so that the cuda mpm b2g dispatcher thread can inform
|
||||
* and store the b2g cuda mpm results for the packet*/
|
||||
Packet **packets_address_buffer;
|
||||
} SCCudaPBPacketsBuffer;
|
||||
|
||||
/**
|
||||
* \brief Structure for each packet that is being batched to the GPU.
|
||||
*/
|
||||
typedef struct SCCudaPBPacketDataForGPU_ {
|
||||
/* holds B2gCudaCtx->m */
|
||||
unsigned int m;
|
||||
/* holds B2gCudaCtx->cuda_B2g */
|
||||
CUdeviceptr table;
|
||||
/* holds the length of the payload */
|
||||
unsigned int payload_len;
|
||||
/* holds the payload. While we actually store the payload in the buffer,
|
||||
* we may not end up using the entire 1480 bytes if the payload is smaller */
|
||||
uint8_t payload[SC_CUDA_PB_MAX_PAYLOAD_SIZE];
|
||||
} SCCudaPBPacketDataForGPU;
|
||||
|
||||
/**
|
||||
* \brief Same as struct SCCudaPBPacketDataForGPU_ except for the payload part.
|
||||
* We will need this for calculating the size of the non-payload part
|
||||
* of the packet data to be buffered.
|
||||
*/
|
||||
typedef struct SCCudaPBPacketDataForGPUNonPayload_ {
|
||||
/* holds B2gCudaCtx->m */
|
||||
unsigned int m;
|
||||
/* holds B2gCudaCtx->cuda_B2g */
|
||||
CUdeviceptr table;
|
||||
/* holds the length of the payload */
|
||||
unsigned int payload_len;
|
||||
} SCCudaPBPacketDataForGPUNonPayload;
|
||||
|
||||
/**
|
||||
* \brief The cuda packet batcher threading context.
|
||||
*/
|
||||
typedef struct SCCudaPBThreadCtx_ {
|
||||
/* we need the detection engine context to retrieve the sgh while we start
|
||||
* receiving and batching the packets */
|
||||
DetectEngineCtx *de_ctx;
|
||||
|
||||
/* packets buffer currently in use inside the cuda batcher thread */
|
||||
SCCudaPBPacketsBuffer *curr_pb;
|
||||
} SCCudaPBThreadCtx;
|
||||
|
||||
SCCudaPBPacketsBuffer *SCCudaPBAllocSCCudaPBPacketsBuffer(void);
|
||||
void SCCudaPBDeAllocSCCudaPBPacketsBuffer(SCCudaPBPacketsBuffer *);
|
||||
|
||||
void SCCudaPBSetBufferPacketThreshhold(uint32_t);
|
||||
void SCCudaPBCleanUpQueuesAndBuffers(void);
|
||||
void SCCudaPBSetUpQueuesAndBuffers(void);
|
||||
void SCCudaPBKillBatchingPackets(void);
|
||||
|
||||
TmEcode SCCudaPBBatchPackets(ThreadVars *, Packet *, void *, PacketQueue *, PacketQueue *);
|
||||
TmEcode SCCudaPBThreadInit(ThreadVars *, void *, void **);
|
||||
TmEcode SCCudaPBThreadDeInit(ThreadVars *, void *);
|
||||
void SCCudaPBThreadExitStats(ThreadVars *, void *);
|
||||
void SCCudaPBRegisterTests(void);
|
||||
|
||||
void TmModuleCudaPacketBatcherRegister(void);
|
||||
|
||||
void *SCCudaPBTmThreadsSlot1(void *);
|
||||
|
||||
void SCCudaPBRunningTests(int);
|
||||
void SCCudaPBSetProfile(char *);
|
||||
|
||||
#endif /* __SC_CUDA_SUPPORT__ */
|
||||
|
||||
#endif /* __CUDA_PACKET_BATCHER_H__ */
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,119 +0,0 @@
|
||||
/* Copyright (C) 2007-2010 Open Information Security Foundation
|
||||
*
|
||||
* You can copy, redistribute or modify this Program under the terms of
|
||||
* the GNU General Public License version 2 as published by the Free
|
||||
* Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* version 2 along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA.
|
||||
*/
|
||||
|
||||
/**
|
||||
* \file Provides cuda utility functions.
|
||||
*
|
||||
* \author Anoop Saldanha <anoopsaldanha@gmail.com>
|
||||
*/
|
||||
|
||||
/* macros decides if cuda is enabled for the platform or not */
|
||||
#ifdef __SC_CUDA_SUPPORT__
|
||||
|
||||
#include <cuda.h>
|
||||
|
||||
#ifndef __UTIL_MPM_CUDA_HANDLERS_H__
|
||||
#define __UTIL_MPM_CUDA_HANDLERS_H__
|
||||
|
||||
typedef enum {
|
||||
SC_CUDA_HL_MTYPE_RULE_NONE = -1,
|
||||
SC_CUDA_HL_MTYPE_RULE_CONTENTS = 0,
|
||||
SC_CUDA_HL_MTYPE_RULE_URICONTENTS,
|
||||
SC_CUDA_HL_MTYPE_APP_LAYER,
|
||||
SC_CUDA_HL_MTYPE_RULE_CUSTOM,
|
||||
SC_CUDA_HL_MTYPE_MAX,
|
||||
} SCCudaHlModuleType;
|
||||
|
||||
typedef struct SCCudaHlModuleDevicePointer_ {
|
||||
/* device pointer name. This is a primary key. For the same module you
|
||||
* can't register different device pointers */
|
||||
char *name;
|
||||
CUdeviceptr d_ptr;
|
||||
|
||||
struct SCCudaHlModuleDevicePointer_ *next;
|
||||
} SCCudaHlModuleDevicePointer;
|
||||
|
||||
typedef struct SCCudaHlModuleCUmodule_ {
|
||||
/* Handle for this CUmodule. This has to be first obtained from the
|
||||
* call to SCCudaHlGetCudaModule() or SCCudaHlGetCudaModuleFromFile() */
|
||||
int cuda_module_handle;
|
||||
|
||||
CUmodule cuda_module;
|
||||
SCCudaHlModuleDevicePointer *device_ptrs;
|
||||
|
||||
struct SCCudaHlModuleCUmodule_ *next;
|
||||
} SCCudaHlModuleCUmodule;
|
||||
|
||||
typedef struct SCCudaHlModuleData_ {
|
||||
/* The unique module handle. This has to be first obtained from the
|
||||
* call to SCCudaHlGetUniqueHandle() */
|
||||
const char *name;
|
||||
int handle;
|
||||
|
||||
CUcontext cuda_context;
|
||||
SCCudaHlModuleCUmodule *cuda_modules;
|
||||
void *(*SCCudaHlDispFunc)(void *);
|
||||
|
||||
struct SCCudaHlModuleData_ *next;
|
||||
} SCCudaHlModuleData;
|
||||
|
||||
/**
|
||||
* \brief Used to hold the cuda configuration from our conf yaml file
|
||||
*/
|
||||
typedef struct SCCudaHlCudaProfile_ {
|
||||
/* profile name. Should be unique */
|
||||
char *name;
|
||||
/* the data associated with this profile */
|
||||
void *data;
|
||||
|
||||
struct SCCudaHlCudaProfile_ *next;
|
||||
} SCCudaHlCudaProfile;
|
||||
|
||||
void SCCudaHlGetYamlConf(void);
|
||||
void *SCCudaHlGetProfile(char *);
|
||||
void SCCudaHlCleanProfiles(void);
|
||||
void SCCudaHlBackupRegisteredProfiles(void);
|
||||
void SCCudaHlRestoreBackupRegisteredProfiles(void);
|
||||
|
||||
int SCCudaHlGetCudaContext(CUcontext *, char *, int);
|
||||
int SCCudaHlGetCudaModule(CUmodule *, const char *, int);
|
||||
int SCCudaHlGetCudaModuleFromFile(CUmodule *, const char *, int);
|
||||
int SCCudaHlGetCudaDevicePtr(CUdeviceptr *, const char *, size_t, void *, int, int);
|
||||
int SCCudaHlFreeCudaDevicePtr(const char *, int, int);
|
||||
int SCCudaHlRegisterDispatcherFunc(void *(*SCCudaHlDispFunc)(void *), int);
|
||||
|
||||
SCCudaHlModuleData *SCCudaHlGetModuleData(uint8_t);
|
||||
const char *SCCudaHlGetModuleName(int);
|
||||
int SCCudaHlGetModuleHandle(const char *);
|
||||
|
||||
int SCCudaHlRegisterModule(const char *);
|
||||
int SCCudaHlDeRegisterModule(const char *);
|
||||
void SCCudaHlDeRegisterAllRegisteredModules(void);
|
||||
|
||||
int SCCudaHlPushCudaContextFromModule(const char *);
|
||||
|
||||
int SCCudaHlTestEnvCudaContextInit(void);
|
||||
int SCCudaHlTestEnvCudaContextDeInit(void);
|
||||
|
||||
void SCCudaHlProcessPacketWithDispatcher(Packet *, DetectEngineThreadCtx *,
|
||||
void *);
|
||||
void SCCudaHlProcessUriWithDispatcher(uint8_t *, uint16_t, DetectEngineThreadCtx *,
|
||||
void *);
|
||||
|
||||
#endif /* __UTIL_CUDA_HANDLERS__ */
|
||||
|
||||
#endif /* __SC_CUDA_SUPPORT__ */
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,144 +0,0 @@
|
||||
/* Copyright (C) 2007-2010 Open Information Security Foundation
|
||||
*
|
||||
* You can copy, redistribute or modify this Program under the terms of
|
||||
* the GNU General Public License version 2 as published by the Free
|
||||
* Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* version 2 along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA.
|
||||
*/
|
||||
|
||||
/**
|
||||
* \file
|
||||
*
|
||||
* \author Victor Julien <victor@inliniac.net>
|
||||
* \author Anoop Saldanha <anoopsaldanha@gmail.com>
|
||||
* \author Martin Beyer <martin.beyer@marasystems.de>
|
||||
*/
|
||||
|
||||
#ifndef __UTIL_MPM_B2G_CUDA_H__
|
||||
#define __UTIL_MPM_B2G_CUDA_H__
|
||||
|
||||
#ifdef __SC_CUDA_SUPPORT__
|
||||
|
||||
#include <cuda.h>
|
||||
#include "decode.h"
|
||||
#include "util-mpm.h"
|
||||
#include "util-bloomfilter.h"
|
||||
|
||||
#define B2G_CUDA_HASHSHIFT 4
|
||||
#define B2G_CUDA_TYPE uint32_t
|
||||
#define B2G_CUDA_WORD_SIZE 32
|
||||
#define B2G_CUDA_Q 2
|
||||
|
||||
#define B2G_CUDA_HASH16(a, b) (((a) << B2G_CUDA_HASHSHIFT) | (b))
|
||||
|
||||
#define B2G_CUDA_SEARCHFUNC B2gCudaSearchBNDMq
|
||||
#define B2G_CUDA_SEARCHFUNC_NAME "B2gCudaSearchBNDMq"
|
||||
|
||||
typedef struct B2gCudaPattern_ {
|
||||
uint8_t flags;
|
||||
/** \todo we're limited to 32/64 byte lengths, uint8_t would be fine here */
|
||||
uint16_t len;
|
||||
/* case sensitive */
|
||||
uint8_t *cs;
|
||||
/* case INsensitive */
|
||||
uint8_t *ci;
|
||||
struct B2gCudaPattern_ *next;
|
||||
uint32_t id;
|
||||
uint8_t *original_pat;
|
||||
} B2gCudaPattern;
|
||||
|
||||
typedef struct B2gCudaHashItem_ {
|
||||
uint16_t idx;
|
||||
uint8_t flags;
|
||||
struct B2gCudaHashItem_ *nxt;
|
||||
} B2gCudaHashItem;
|
||||
|
||||
typedef struct B2gCudaCtx_ {
|
||||
/* unique handle given by the cuda-handlers API, which indicates the module
|
||||
* in the engine that is holding this B2g_Cuda_Ctx */
|
||||
int module_handle;
|
||||
|
||||
/* cuda device pointer to B2gCudaCtx->B2G */
|
||||
CUdeviceptr cuda_B2G;
|
||||
|
||||
B2G_CUDA_TYPE *B2G;
|
||||
B2G_CUDA_TYPE m;
|
||||
BloomFilter **bloom;
|
||||
/* array containing the minimal length of the patters in a hash bucket.
|
||||
* Used for the BloomFilter. */
|
||||
uint8_t *pminlen;
|
||||
/* pattern arrays */
|
||||
B2gCudaPattern **parray;
|
||||
|
||||
uint16_t pat_1_cnt;
|
||||
#ifdef B2G_CUDA_SEARCH2
|
||||
uint16_t pat_2_cnt;
|
||||
#endif
|
||||
uint16_t pat_x_cnt;
|
||||
|
||||
uint32_t hash_size;
|
||||
B2gCudaHashItem **hash;
|
||||
B2gCudaHashItem hash1[256];
|
||||
#ifdef B2G_CUDA_SEARCH2
|
||||
B2gCudaHashItem **hash2;
|
||||
#endif
|
||||
|
||||
/* hash used during ctx initialization */
|
||||
B2gCudaPattern **init_hash;
|
||||
|
||||
uint8_t s0;
|
||||
|
||||
/* we store our own multi byte search func ptr here for B2gCudaSearch1 */
|
||||
uint32_t (*Search)(struct MpmCtx_ *, struct MpmThreadCtx_ *,
|
||||
PatternMatcherQueue *, uint8_t *, uint16_t);
|
||||
|
||||
/* we store our own multi byte search func ptr here for B2gCudaSearch2 */
|
||||
uint32_t (*MBSearch2)(struct MpmCtx_ *, struct MpmThreadCtx_ *,
|
||||
PatternMatcherQueue *, uint8_t *, uint16_t);
|
||||
uint32_t (*MBSearch)(struct MpmCtx_ *, struct MpmThreadCtx_ *,
|
||||
PatternMatcherQueue *, uint8_t *, uint16_t);
|
||||
} B2gCudaCtx;
|
||||
|
||||
typedef struct B2gCudaThreadCtx_ {
|
||||
#ifdef B2G_CUDA_COUNTERS
|
||||
uint32_t stat_pminlen_calls;
|
||||
uint32_t stat_pminlen_total;
|
||||
uint32_t stat_bloom_calls;
|
||||
uint32_t stat_bloom_hits;
|
||||
uint32_t stat_calls;
|
||||
uint32_t stat_m_total;
|
||||
uint32_t stat_d0;
|
||||
uint32_t stat_d0_hashloop;
|
||||
uint32_t stat_loop_match;
|
||||
uint32_t stat_loop_no_match;
|
||||
uint32_t stat_num_shift;
|
||||
uint32_t stat_total_shift;
|
||||
#endif /* B2G_CUDA_COUNTERS */
|
||||
} B2gCudaThreadCtx;
|
||||
|
||||
void MpmB2gCudaRegister(void);
|
||||
void TmModuleCudaMpmB2gRegister(void);
|
||||
|
||||
int B2gCudaStartDispatcherThreadRC(const char *);
|
||||
void B2gCudaKillDispatcherThreadRC(void);
|
||||
int B2gCudaResultsPostProcessing(Packet *, MpmCtx *, MpmThreadCtx *,
|
||||
PatternMatcherQueue *);
|
||||
uint32_t B2gCudaSearch1(MpmCtx *, MpmThreadCtx *, PatternMatcherQueue *,
|
||||
uint8_t *, uint16_t);
|
||||
#ifdef B2G_CUDA_SEARCH2
|
||||
uint32_t B2gCudaSearch2(MpmCtx *, MpmThreadCtx *, PatternMatcherQueue *,
|
||||
uint8_t *, uint16_t);
|
||||
#endif
|
||||
|
||||
#endif /* __SC_CUDA_SUPPORT__ */
|
||||
|
||||
#endif /* __UTIL_MPM_B2G_CUDA_H__ */
|
||||
Loading…
Reference in New Issue