flow: remove use_cnt

Packets only ever reference the flow while holding its lock. This
means than any code possibly evicting the flow will have to wait
for the existing users to complete their work. Therefore the use_cnt
serves no function anymore and can be removed.
pull/8495/head
Victor Julien 3 years ago
parent a2dc9a40e7
commit 7951d8a14f

@ -840,23 +840,18 @@ Flow *FlowGetFlowFromHash(ThreadVars *tv, FlowLookupStruct *fls, Packet *p, Flow
FlowIsTimedOut(f, (uint32_t)SCTIME_SECS(p->ts), emerg));
if (timedout) {
FLOWLOCK_WRLOCK(f);
if (likely(f->use_cnt == 0)) {
next_f = f->next;
MoveToWorkQueue(tv, fls, fb, f, prev_f);
FLOWLOCK_UNLOCK(f);
goto flow_removed;
}
next_f = f->next;
MoveToWorkQueue(tv, fls, fb, f, prev_f);
FLOWLOCK_UNLOCK(f);
goto flow_removed;
} else if (FlowCompare(f, p) != 0) {
FLOWLOCK_WRLOCK(f);
/* found a matching flow that is not timed out */
if (unlikely(TcpSessionPacketSsnReuse(p, f, f->protoctx) == 1)) {
Flow *new_f = TcpReuseReplace(tv, fls, fb, f, hash, p);
if (likely(f->use_cnt == 0)) {
if (prev_f == NULL) /* if we have no prev it means new_f is now our prev */
prev_f = new_f;
MoveToWorkQueue(tv, fls, fb, f, prev_f); /* evict old flow */
}
if (prev_f == NULL) /* if we have no prev it means new_f is now our prev */
prev_f = new_f;
MoveToWorkQueue(tv, fls, fb, f, prev_f); /* evict old flow */
FLOWLOCK_UNLOCK(f); /* unlock old replaced flow */
if (new_f == NULL) {
@ -1150,8 +1145,8 @@ static inline bool StillAlive(const Flow *f, const SCTime_t ts)
*
* Called in conditions where the spare queue is empty and memcap is reached.
*
* Walks the hash until a flow can be freed. Timeouts are disregarded, use_cnt
* is adhered to. "flow_prune_idx" atomic int makes sure we don't start at the
* Walks the hash until a flow can be freed. Timeouts are disregarded.
* "flow_prune_idx" atomic int makes sure we don't start at the
* top each time since that would clear the top of the hash leading to longer
* and longer search times under high pressure (observed).
*
@ -1195,15 +1190,6 @@ static Flow *FlowGetUsedFlow(ThreadVars *tv, DecodeThreadVars *dtv, const SCTime
continue;
}
/** never prune a flow that is used by a packet or stream msg
* we are currently processing in one of the threads */
if (f->use_cnt > 0) {
STATSADDUI64(counter_flow_get_used_eval_busy, 1);
FBLOCK_UNLOCK(fb);
FLOWLOCK_UNLOCK(f);
continue;
}
if (StillAlive(f, ts)) {
STATSADDUI64(counter_flow_get_used_eval_reject, 1);
FBLOCK_UNLOCK(fb);

@ -348,12 +348,9 @@ static void FlowManagerHashRowTimeout(FlowManagerTimeoutThread *td, Flow *f, SCT
/* never prune a flow that is used by a packet we
* are currently processing in one of the threads */
if (f->use_cnt > 0 || !FlowBypassedTimeout(f, ts, counters)) {
if (!FlowBypassedTimeout(f, ts, counters)) {
FLOWLOCK_UNLOCK(f);
prev_f = f;
if (f->use_cnt > 0) {
counters->flows_timeout_inuse++;
}
f = f->next;
continue;
}
@ -384,8 +381,6 @@ static void FlowManagerHashRowClearEvictedList(
f->next = NULL;
f->fb = NULL;
DEBUG_VALIDATE_BUG_ON(f->use_cnt > 0);
FlowQueuePrivateAppendFlow(&td->aside_queue, f);
/* flow is still locked in the queue */
@ -545,7 +540,7 @@ static uint32_t FlowManagerHashRowCleanup(Flow *f, FlowQueuePrivate *recycle_q,
}
f->flow_end_flags |= FLOW_END_FLAG_SHUTDOWN;
/* no one is referring to this flow, use_cnt 0, removed from hash
/* no one is referring to this flow, removed from hash
* so we can unlock it and move it to the recycle queue. */
FLOWLOCK_UNLOCK(f);
FlowQueuePrivateAppendFlow(recycle_q, f);

@ -48,7 +48,6 @@
(f)->vlan_idx = 0; \
(f)->next = NULL; \
(f)->flow_state = 0; \
(f)->use_cnt = 0; \
(f)->tenant_id = 0; \
(f)->parent_id = 0; \
(f)->probing_parser_toserver_alproto_masks = 0; \
@ -94,7 +93,6 @@
(f)->timeout_at = 0; \
(f)->timeout_policy = 0; \
(f)->flow_state = 0; \
(f)->use_cnt = 0; \
(f)->tenant_id = 0; \
(f)->parent_id = 0; \
(f)->probing_parser_toserver_alproto_masks = 0; \

@ -189,10 +189,7 @@ static void CheckWorkQueue(ThreadVars *tv, FlowWorkerThreadData *fw,
}
}
/* this should not be possible */
BUG_ON(f->use_cnt > 0);
/* no one is referring to this flow, use_cnt 0, removed from hash
/* no one is referring to this flow, removed from hash
* so we can unlock it and pass it to the flow recycler */
if (fw->output_thread_flow != NULL)

@ -699,7 +699,6 @@ void FlowShutdown(void)
for (uint32_t u = 0; u < flow_config.hash_size; u++) {
f = flow_hash[u].head;
while (f) {
DEBUG_VALIDATE_BUG_ON(f->use_cnt != 0);
Flow *n = f->next;
uint8_t proto_map = FlowGetProtoMapping(f->proto);
FlowClearMemory(f, proto_map);
@ -708,7 +707,6 @@ void FlowShutdown(void)
}
f = flow_hash[u].evicted;
while (f) {
DEBUG_VALIDATE_BUG_ON(f->use_cnt != 0);
Flow *n = f->next;
uint8_t proto_map = FlowGetProtoMapping(f->proto);
FlowClearMemory(f, proto_map);

@ -379,12 +379,6 @@ typedef struct Flow_
uint8_t proto;
uint8_t recursion_level;
uint16_t vlan_id[2];
/** how many references exist to this flow *right now*
*
* On receiving a packet the counter is incremented while the flow
* bucked is locked, which is also the case on timeout pruning.
*/
FlowRefCount use_cnt;
uint8_t vlan_idx;
@ -644,33 +638,7 @@ static inline void FlowSetNoPayloadInspectionFlag(Flow *f)
SCReturn;
}
/**
* \brief increase the use count of a flow
*
* \param f flow to decrease use count for
*/
static inline void FlowIncrUsecnt(Flow *f)
{
if (f == NULL)
return;
f->use_cnt++;
}
/**
* \brief decrease the use count of a flow
*
* \param f flow to decrease use count for
*/
static inline void FlowDecrUsecnt(Flow *f)
{
if (f == NULL)
return;
f->use_cnt--;
}
/** \brief Reference the flow, bumping the flows use_cnt
/** \brief Reference the flow
* \note This should only be called once for a destination
* pointer */
static inline void FlowReference(Flow **d, Flow *f)
@ -682,7 +650,6 @@ static inline void FlowReference(Flow **d, Flow *f)
if (*d == f)
return;
#endif
FlowIncrUsecnt(f);
*d = f;
}
}
@ -690,7 +657,6 @@ static inline void FlowReference(Flow **d, Flow *f)
static inline void FlowDeReference(Flow **d)
{
if (likely(*d != NULL)) {
FlowDecrUsecnt(*d);
*d = NULL;
}
}

@ -238,7 +238,7 @@ void StreamTcpSessionCleanup(TcpSession *ssn)
*
* This function is called when the flow is destroyed, so it should free
* *everything* related to the tcp session. So including the app layer
* data. We are guaranteed to only get here when the flow's use_cnt is 0.
* data.
*
* \param ssn Void ptr to the ssn.
*/
@ -269,11 +269,6 @@ void StreamTcpSessionClear(void *ssnptr)
/**
* \brief Function to return the stream segments back to the pool.
*
* We don't clear out the app layer storage here as that is under protection
* of the "use_cnt" reference counter in the flow. This function is called
* when the use_cnt is always at least 1 (this pkt has incremented the flow
* use_cnt itself), so we don't bother.
*
* \param p Packet used to identify the stream.
*/
void StreamTcpSessionPktFree (Packet *p)

@ -908,7 +908,6 @@ uint32_t UTHBuildPacketOfFlows(uint32_t start, uint32_t end, uint8_t dir)
}
FlowHandlePacket(NULL, &fls, p);
if (p->flow != NULL) {
p->flow->use_cnt = 0;
FLOWLOCK_UNLOCK(p->flow);
}

Loading…
Cancel
Save