Пример #1
0
/** \internal
 *  \brief Get a flow from the hash directly.
 *
 *  Called in conditions where the spare queue is empty and memcap is reached.
 *
 *  Walks the hash until a flow can be freed. Timeouts are disregarded, use_cnt
 *  is adhered to. "flow_prune_idx" atomic int makes sure we don't start at the
 *  top each time since that would clear the top of the hash leading to longer
 *  and longer search times under high pressure (observed).
 *
 *  \retval f flow or NULL
 */
static Flow *FlowGetUsedFlow(void)
{
    uint32_t idx = SC_ATOMIC_GET(flow_prune_idx) % flow_config.hash_size;
    uint32_t cnt = flow_config.hash_size;

    while (cnt--) {
        if (++idx >= flow_config.hash_size)
            idx = 0;

        FlowBucket *fb = &flow_hash[idx];

        if (FBLOCK_TRYLOCK(fb) != 0)
            continue;

        Flow *f = fb->tail;
        if (f == NULL) {
            FBLOCK_UNLOCK(fb);
            continue;
        }

        if (FLOWLOCK_TRYWRLOCK(f) != 0) {
            FBLOCK_UNLOCK(fb);
            continue;
        }

        /** never prune a flow that is used by a packet or stream msg
         *  we are currently processing in one of the threads */
        if (SC_ATOMIC_GET(f->use_cnt) > 0) {
            FBLOCK_UNLOCK(fb);
            FLOWLOCK_UNLOCK(f);
            continue;
        }

        /* remove from the hash */
        if (f->hprev != NULL)
            f->hprev->hnext = f->hnext;
        if (f->hnext != NULL)
            f->hnext->hprev = f->hprev;
        if (fb->head == f)
            fb->head = f->hnext;
        if (fb->tail == f)
            fb->tail = f->hprev;

        f->hnext = NULL;
        f->hprev = NULL;
        f->fb = NULL;
        FBLOCK_UNLOCK(fb);

        FlowClearMemory(f, f->protomap);

        FLOWLOCK_UNLOCK(f);

        (void) SC_ATOMIC_ADD(flow_prune_idx, (flow_config.hash_size - cnt));
        return f;
    }

    return NULL;
}
Пример #2
0
/**
 * \internal
 * \brief Forces reassembly for flows that need it.
 *
 * When this function is called we're running in virtually dead engine,
 * so locking the flows is not strictly required. The reasons it is still
 * done are:
 * - code consistency
 * - silence complaining profilers
 * - allow us to aggressively check using debug valdation assertions
 * - be robust in case of future changes
 * - locking overhead if neglectable when no other thread fights us
 *
 * \param q The queue to process flows from.
 */
static inline void FlowForceReassemblyForHash(void)
{
    Flow *f;
    TcpSession *ssn;
    int client_ok = 0;
    int server_ok = 0;
    uint32_t idx = 0;

    for (idx = 0; idx < flow_config.hash_size; idx++) {
        FlowBucket *fb = &flow_hash[idx];

        PacketPoolWaitForN(9);
        FBLOCK_LOCK(fb);

        /* get the topmost flow from the QUEUE */
        f = fb->head;

        /* we need to loop through all the flows in the queue */
        while (f != NULL) {
            PacketPoolWaitForN(3);

            FLOWLOCK_WRLOCK(f);

            /* Get the tcp session for the flow */
            ssn = (TcpSession *)f->protoctx;

            /* \todo Also skip flows that shouldn't be inspected */
            if (ssn == NULL) {
                FLOWLOCK_UNLOCK(f);
                f = f->hnext;
                continue;
            }

            if (FlowForceReassemblyNeedReassembly(f, &server_ok, &client_ok) == 1) {
                FlowForceReassemblyForFlow(f, server_ok, client_ok);
            }

            FLOWLOCK_UNLOCK(f);

            /* next flow in the queue */
            f = f->hnext;
        }
        FBLOCK_UNLOCK(fb);
    }
    return;
}
Пример #3
0
/* FlowGetFlowFromHash
 *
 * Hash retrieval function for flows. Looks up the hash bucket containing the
 * flow pointer. Then compares the packet with the found flow to see if it is
 * the flow we need. If it isn't, walk the list until the right flow is found.
 *
 * If the flow is not found or the bucket was emtpy, a new flow is taken from
 * the queue. FlowDequeue() will alloc new flows as long as we stay within our
 * memcap limit.
 *
 * The p->flow pointer is updated to point to the flow.
 *
 * returns a *LOCKED* flow or NULL
 */
Flow *FlowGetFlowFromHash(const Packet *p)
{
    Flow *f = NULL;
    FlowHashCountInit;

    /* get the key to our bucket */
    uint32_t key = FlowGetKey(p);
    /* get our hash bucket and lock it */
    FlowBucket *fb = &flow_hash[key];
    FBLOCK_LOCK(fb);

    SCLogDebug("fb %p fb->head %p", fb, fb->head);

    FlowHashCountIncr;

    /* see if the bucket already has a flow */
    if (fb->head == NULL) {
        f = FlowGetNew(p);
        if (f == NULL) {
            FBLOCK_UNLOCK(fb);
            FlowHashCountUpdate;
            return NULL;
        }

        /* flow is locked */
        fb->head = f;
        fb->tail = f;

        /* got one, now lock, initialize and return */
        FlowInit(f, p);
        f->fb = fb;

        FBLOCK_UNLOCK(fb);
        FlowHashCountUpdate;
        return f;
    }

    /* ok, we have a flow in the bucket. Let's find out if it is our flow */
    f = fb->head;

    /* see if this is the flow we are looking for */
    if (FlowCompare(f, p) == 0) {
        Flow *pf = NULL; /* previous flow */

        while (f) {
            FlowHashCountIncr;

            pf = f;
            f = f->hnext;

            if (f == NULL) {
                f = pf->hnext = FlowGetNew(p);
                if (f == NULL) {
                    FBLOCK_UNLOCK(fb);
                    FlowHashCountUpdate;
                    return NULL;
                }
                fb->tail = f;

                /* flow is locked */

                f->hprev = pf;

                /* initialize and return */
                FlowInit(f, p);
                f->fb = fb;

                FBLOCK_UNLOCK(fb);
                FlowHashCountUpdate;
                return f;
            }

            if (FlowCompare(f, p) != 0) {
                /* we found our flow, lets put it on top of the
                 * hash list -- this rewards active flows */
                if (f->hnext) {
                    f->hnext->hprev = f->hprev;
                }
                if (f->hprev) {
                    f->hprev->hnext = f->hnext;
                }
                if (f == fb->tail) {
                    fb->tail = f->hprev;
                }

                f->hnext = fb->head;
                f->hprev = NULL;
                fb->head->hprev = f;
                fb->head = f;

                /* found our flow, lock & return */
                FLOWLOCK_WRLOCK(f);
                FBLOCK_UNLOCK(fb);
                FlowHashCountUpdate;
                return f;
            }
        }
    }

    /* lock & return */
    FLOWLOCK_WRLOCK(f);
    FBLOCK_UNLOCK(fb);
    FlowHashCountUpdate;
    return f;
}
Пример #4
0
/** \internal
 *  \brief Get a flow from the hash directly.
 *
 *  Called in conditions where the spare queue is empty and memcap is reached.
 *
 *  Walks the hash until a flow can be freed. Timeouts are disregarded, use_cnt
 *  is adhered to. "flow_prune_idx" atomic int makes sure we don't start at the
 *  top each time since that would clear the top of the hash leading to longer
 *  and longer search times under high pressure (observed).
 *
 *  \param tv thread vars
 *  \param dtv decode thread vars (for flow log api thread data)
 *
 *  \retval f flow or NULL
 */
static Flow *FlowGetUsedFlow(ThreadVars *tv, DecodeThreadVars *dtv)
{
    uint32_t idx = SC_ATOMIC_GET(flow_prune_idx) % flow_config.hash_size;
    uint32_t cnt = flow_config.hash_size;

    while (cnt--) {
        if (++idx >= flow_config.hash_size)
            idx = 0;

        FlowBucket *fb = &flow_hash[idx];

        if (FBLOCK_TRYLOCK(fb) != 0)
            continue;

        Flow *f = fb->tail;
        if (f == NULL) {
            FBLOCK_UNLOCK(fb);
            continue;
        }

        if (FLOWLOCK_TRYWRLOCK(f) != 0) {
            FBLOCK_UNLOCK(fb);
            continue;
        }

        /** never prune a flow that is used by a packet or stream msg
         *  we are currently processing in one of the threads */
        if (SC_ATOMIC_GET(f->use_cnt) > 0) {
            FBLOCK_UNLOCK(fb);
            FLOWLOCK_UNLOCK(f);
            continue;
        }

        /* remove from the hash */
        if (f->hprev != NULL)
            f->hprev->hnext = f->hnext;
        if (f->hnext != NULL)
            f->hnext->hprev = f->hprev;
        if (fb->head == f)
            fb->head = f->hnext;
        if (fb->tail == f)
            fb->tail = f->hprev;

        f->hnext = NULL;
        f->hprev = NULL;
        f->fb = NULL;
        SC_ATOMIC_SET(fb->next_ts, 0);
        FBLOCK_UNLOCK(fb);

        int state = SC_ATOMIC_GET(f->flow_state);
        if (state == FLOW_STATE_NEW)
            f->flow_end_flags |= FLOW_END_FLAG_STATE_NEW;
        else if (state == FLOW_STATE_ESTABLISHED)
            f->flow_end_flags |= FLOW_END_FLAG_STATE_ESTABLISHED;
        else if (state == FLOW_STATE_CLOSED)
            f->flow_end_flags |= FLOW_END_FLAG_STATE_CLOSED;
        else if (state == FLOW_STATE_CAPTURE_BYPASSED)
            f->flow_end_flags |= FLOW_END_FLAG_STATE_BYPASSED;
        else if (state == FLOW_STATE_LOCAL_BYPASSED)
            f->flow_end_flags |= FLOW_END_FLAG_STATE_BYPASSED;

        f->flow_end_flags |= FLOW_END_FLAG_FORCED;

        if (SC_ATOMIC_GET(flow_flags) & FLOW_EMERGENCY)
            f->flow_end_flags |= FLOW_END_FLAG_EMERGENCY;

        /* invoke flow log api */
        if (dtv && dtv->output_flow_thread_data)
            (void)OutputFlowLog(tv, dtv->output_flow_thread_data, f);

        FlowClearMemory(f, f->protomap);

        FlowUpdateState(f, FLOW_STATE_NEW);

        FLOWLOCK_UNLOCK(f);

        (void) SC_ATOMIC_ADD(flow_prune_idx, (flow_config.hash_size - cnt));
        return f;
    }

    return NULL;
}
Пример #5
0
/** \brief Get Flow for packet
 *
 * Hash retrieval function for flows. Looks up the hash bucket containing the
 * flow pointer. Then compares the packet with the found flow to see if it is
 * the flow we need. If it isn't, walk the list until the right flow is found.
 *
 * If the flow is not found or the bucket was emtpy, a new flow is taken from
 * the queue. FlowDequeue() will alloc new flows as long as we stay within our
 * memcap limit.
 *
 * The p->flow pointer is updated to point to the flow.
 *
 *  \param tv thread vars
 *  \param dtv decode thread vars (for flow log api thread data)
 *
 *  \retval f *LOCKED* flow or NULL
 */
Flow *FlowGetFlowFromHash(ThreadVars *tv, DecodeThreadVars *dtv, const Packet *p, Flow **dest)
{
    Flow *f = NULL;

    /* get our hash bucket and lock it */
    const uint32_t hash = p->flow_hash;
    FlowBucket *fb = &flow_hash[hash % flow_config.hash_size];
    FBLOCK_LOCK(fb);

    SCLogDebug("fb %p fb->head %p", fb, fb->head);

    /* see if the bucket already has a flow */
    if (fb->head == NULL) {
        f = FlowGetNew(tv, dtv, p);
        if (f == NULL) {
            FBLOCK_UNLOCK(fb);
            return NULL;
        }

        /* flow is locked */
        fb->head = f;
        fb->tail = f;

        /* got one, now lock, initialize and return */
        FlowInit(f, p);
        f->flow_hash = hash;
        f->fb = fb;
        FlowUpdateState(f, FLOW_STATE_NEW);

        FlowReference(dest, f);

        FBLOCK_UNLOCK(fb);
        return f;
    }

    /* ok, we have a flow in the bucket. Let's find out if it is our flow */
    f = fb->head;

    /* see if this is the flow we are looking for */
    if (FlowCompare(f, p) == 0) {
        Flow *pf = NULL; /* previous flow */

        while (f) {
            pf = f;
            f = f->hnext;

            if (f == NULL) {
                f = pf->hnext = FlowGetNew(tv, dtv, p);
                if (f == NULL) {
                    FBLOCK_UNLOCK(fb);
                    return NULL;
                }
                fb->tail = f;

                /* flow is locked */

                f->hprev = pf;

                /* initialize and return */
                FlowInit(f, p);
                f->flow_hash = hash;
                f->fb = fb;
                FlowUpdateState(f, FLOW_STATE_NEW);

                FlowReference(dest, f);

                FBLOCK_UNLOCK(fb);
                return f;
            }

            if (FlowCompare(f, p) != 0) {
                /* we found our flow, lets put it on top of the
                 * hash list -- this rewards active flows */
                if (f->hnext) {
                    f->hnext->hprev = f->hprev;
                }
                if (f->hprev) {
                    f->hprev->hnext = f->hnext;
                }
                if (f == fb->tail) {
                    fb->tail = f->hprev;
                }

                f->hnext = fb->head;
                f->hprev = NULL;
                fb->head->hprev = f;
                fb->head = f;

                /* found our flow, lock & return */
                FLOWLOCK_WRLOCK(f);
                if (unlikely(TcpSessionPacketSsnReuse(p, f, f->protoctx) == 1)) {
                    f = TcpReuseReplace(tv, dtv, fb, f, hash, p);
                    if (f == NULL) {
                        FBLOCK_UNLOCK(fb);
                        return NULL;
                    }
                }

                FlowReference(dest, f);

                FBLOCK_UNLOCK(fb);
                return f;
            }
        }
    }

    /* lock & return */
    FLOWLOCK_WRLOCK(f);
    if (unlikely(TcpSessionPacketSsnReuse(p, f, f->protoctx) == 1)) {
        f = TcpReuseReplace(tv, dtv, fb, f, hash, p);
        if (f == NULL) {
            FBLOCK_UNLOCK(fb);
            return NULL;
        }
    }

    FlowReference(dest, f);

    FBLOCK_UNLOCK(fb);
    return f;
}
Пример #6
0
/**
 * \internal
 * \brief Forces reassembly for flows that need it.
 *
 * When this function is called we're running in virtually dead engine,
 * so locking the flows is not strictly required. The reasons it is still
 * done are:
 * - code consistency
 * - silence complaining profilers
 * - allow us to aggressively check using debug valdation assertions
 * - be robust in case of future changes
 * - locking overhead if neglectable when no other thread fights us
 *
 * \param q The queue to process flows from.
 */
static inline void FlowForceReassemblyForHash(void)
{
    Flow *f;
    TcpSession *ssn;
    int client_ok;
    int server_ok;
    int tcp_needs_inspection;

    uint32_t idx = 0;

    /* We use this packet just for reassembly purpose */
    Packet *reassemble_p = PacketGetFromAlloc();
    if (reassemble_p == NULL)
        return;

    for (idx = 0; idx < flow_config.hash_size; idx++) {
        FlowBucket *fb = &flow_hash[idx];

        FBLOCK_LOCK(fb);

        /* get the topmost flow from the QUEUE */
        f = fb->head;

        /* we need to loop through all the flows in the queue */
        while (f != NULL) {
            PACKET_RECYCLE(reassemble_p);

            FLOWLOCK_WRLOCK(f);

            /* Get the tcp session for the flow */
            ssn = (TcpSession *)f->protoctx;

            /* \todo Also skip flows that shouldn't be inspected */
            if (ssn == NULL) {
                FLOWLOCK_UNLOCK(f);
                f = f->hnext;
                continue;
            }

            /* ah ah!  We have some unattended toserver segments */
            if ((client_ok = StreamHasUnprocessedSegments(ssn, 0)) == 1) {
                StreamTcpThread *stt = SC_ATOMIC_GET(stream_pseudo_pkt_stream_tm_slot->slot_data);

                ssn->client.last_ack = (ssn->client.seg_list_tail->seq +
                        ssn->client.seg_list_tail->payload_len);

                FlowForceReassemblyPseudoPacketSetup(reassemble_p, 1, f, ssn, 1);
                StreamTcpReassembleHandleSegment(stream_pseudo_pkt_stream_TV,
                        stt->ra_ctx, ssn, &ssn->server,
                        reassemble_p, NULL);
                FlowDeReference(&reassemble_p->flow);
                if (StreamTcpReassembleProcessAppLayer(stt->ra_ctx) < 0) {
                    SCLogDebug("shutdown flow timeout "
                               "StreamTcpReassembleProcessAppLayer() erroring "
                               "over something");
                }
            }
            /* oh oh!  We have some unattended toclient segments */
            if ((server_ok = StreamHasUnprocessedSegments(ssn, 1)) == 1) {
                StreamTcpThread *stt = SC_ATOMIC_GET(stream_pseudo_pkt_stream_tm_slot->slot_data);

                ssn->server.last_ack = (ssn->server.seg_list_tail->seq +
                        ssn->server.seg_list_tail->payload_len);

                FlowForceReassemblyPseudoPacketSetup(reassemble_p, 0, f, ssn, 1);
                StreamTcpReassembleHandleSegment(stream_pseudo_pkt_stream_TV,
                        stt->ra_ctx, ssn, &ssn->client,
                        reassemble_p, NULL);
                FlowDeReference(&reassemble_p->flow);
                if (StreamTcpReassembleProcessAppLayer(stt->ra_ctx) < 0) {
                    SCLogDebug("shutdown flow timeout "
                               "StreamTcpReassembleProcessAppLayer() erroring "
                               "over something");
                }
            }

            if (ssn->state >= TCP_ESTABLISHED && ssn->state != TCP_CLOSED)
                tcp_needs_inspection = 1;
            else
                tcp_needs_inspection = 0;

            FLOWLOCK_UNLOCK(f);

            /* insert a pseudo packet in the toserver direction */
            if (client_ok || tcp_needs_inspection)
            {
                FLOWLOCK_WRLOCK(f);
                Packet *p = FlowForceReassemblyPseudoPacketGet(0, f, ssn, 1);
                FLOWLOCK_UNLOCK(f);

                if (p == NULL) {
                    TmqhOutputPacketpool(NULL, reassemble_p);
                    FBLOCK_UNLOCK(fb);
                    return;
                }
                PKT_SET_SRC(p, PKT_SRC_FFR_SHUTDOWN);

                if (stream_pseudo_pkt_detect_prev_TV != NULL) {
                    stream_pseudo_pkt_detect_prev_TV->
                        tmqh_out(stream_pseudo_pkt_detect_prev_TV, p);
                } else {
                    TmSlot *s = stream_pseudo_pkt_detect_tm_slot;
                    while (s != NULL) {
                        TmSlotFunc SlotFunc = SC_ATOMIC_GET(s->SlotFunc);
                        SlotFunc(NULL, p, SC_ATOMIC_GET(s->slot_data), &s->slot_pre_pq,
                                    &s->slot_post_pq);
                        s = s->slot_next;
                    }

                    if (stream_pseudo_pkt_detect_TV != NULL) {
                        stream_pseudo_pkt_detect_TV->
                            tmqh_out(stream_pseudo_pkt_detect_TV, p);
                    } else {
                        TmqhOutputPacketpool(NULL, p);
                    }
                }
            } /* if (ssn->client.seg_list != NULL) */
            if (server_ok || tcp_needs_inspection)
            {
                FLOWLOCK_WRLOCK(f);
                Packet *p = FlowForceReassemblyPseudoPacketGet(1, f, ssn, 1);
                FLOWLOCK_UNLOCK(f);

                if (p == NULL) {
                    TmqhOutputPacketpool(NULL, reassemble_p);
                    FBLOCK_UNLOCK(fb);
                    return;
                }
                PKT_SET_SRC(p, PKT_SRC_FFR_SHUTDOWN);

                if (stream_pseudo_pkt_detect_prev_TV != NULL) {
                    stream_pseudo_pkt_detect_prev_TV->
                        tmqh_out(stream_pseudo_pkt_detect_prev_TV, p);
                } else {
                    TmSlot *s = stream_pseudo_pkt_detect_tm_slot;
                    while (s != NULL) {
                        TmSlotFunc SlotFunc = SC_ATOMIC_GET(s->SlotFunc);
                        SlotFunc(NULL, p, SC_ATOMIC_GET(s->slot_data), &s->slot_pre_pq,
                                    &s->slot_post_pq);
                        s = s->slot_next;
                    }

                    if (stream_pseudo_pkt_detect_TV != NULL) {
                        stream_pseudo_pkt_detect_TV->
                            tmqh_out(stream_pseudo_pkt_detect_TV, p);
                    } else {
                        TmqhOutputPacketpool(NULL, p);
                    }
                }
            } /* if (ssn->server.seg_list != NULL) */

            /* next flow in the queue */
            f = f->hnext;
        } /* while (f != NULL) */
        FBLOCK_UNLOCK(fb);
    }

    PKT_SET_SRC(reassemble_p, PKT_SRC_FFR_SHUTDOWN);
    TmqhOutputPacketpool(NULL, reassemble_p);
    return;
}
Пример #7
0
/**
 * \internal
 * \brief Forces reassembly for flows that need it.
 *
 * When this function is called we're running in virtually dead engine,
 * so locking the flows is not strictly required. The reasons it is still
 * done are:
 * - code consistency
 * - silence complaining profilers
 * - allow us to aggressively check using debug valdation assertions
 * - be robust in case of future changes
 * - locking overhead if neglectable when no other thread fights us
 *
 * \param q The queue to process flows from.
 */
static inline void FlowForceReassemblyForHash(void)
{
    Flow *f;
    TcpSession *ssn;
    int client_ok;
    int server_ok;

    uint32_t idx = 0;

    /* We use this packet just for reassembly purpose */
    Packet *reassemble_p = PacketGetFromAlloc();
    if (reassemble_p == NULL)
        return;

    for (idx = 0; idx < flow_config.hash_size; idx++) {
        FlowBucket *fb = &flow_hash[idx];

        FBLOCK_LOCK(fb);

        /* get the topmost flow from the QUEUE */
        f = fb->head;

        /* we need to loop through all the flows in the queue */
        while (f != NULL) {
            PACKET_RECYCLE(reassemble_p);

            FLOWLOCK_WRLOCK(f);

            /* Get the tcp session for the flow */
            ssn = (TcpSession *)f->protoctx;

            /* \todo Also skip flows that shouldn't be inspected */
            if (ssn == NULL) {
                FLOWLOCK_UNLOCK(f);
                f = f->hnext;
                continue;
            }

            (void)FlowForceReassemblyNeedReassembly(f, &server_ok, &client_ok);

            /* ah ah!  We have some unattended toserver segments */
            if (client_ok == STREAM_HAS_UNPROCESSED_SEGMENTS_NEED_REASSEMBLY) {
                StreamTcpThread *stt = SC_ATOMIC_GET(stream_pseudo_pkt_stream_tm_slot->slot_data);

                ssn->client.last_ack = (ssn->client.seg_list_tail->seq +
                        ssn->client.seg_list_tail->payload_len);

                FlowForceReassemblyPseudoPacketSetup(reassemble_p, 1, f, ssn, 1);
                StreamTcpReassembleHandleSegment(stream_pseudo_pkt_stream_TV,
                        stt->ra_ctx, ssn, &ssn->server,
                        reassemble_p, NULL);
                FlowDeReference(&reassemble_p->flow);
            }
            /* oh oh!  We have some unattended toclient segments */
            if (server_ok == STREAM_HAS_UNPROCESSED_SEGMENTS_NEED_REASSEMBLY) {
                StreamTcpThread *stt = SC_ATOMIC_GET(stream_pseudo_pkt_stream_tm_slot->slot_data);

                ssn->server.last_ack = (ssn->server.seg_list_tail->seq +
                        ssn->server.seg_list_tail->payload_len);

                FlowForceReassemblyPseudoPacketSetup(reassemble_p, 0, f, ssn, 1);
                StreamTcpReassembleHandleSegment(stream_pseudo_pkt_stream_TV,
                        stt->ra_ctx, ssn, &ssn->client,
                        reassemble_p, NULL);
                FlowDeReference(&reassemble_p->flow);
            }

            FLOWLOCK_UNLOCK(f);

            /* insert a pseudo packet in the toserver direction */
            if (client_ok) {
                FLOWLOCK_WRLOCK(f);
                Packet *p = FlowForceReassemblyPseudoPacketGet(0, f, ssn, 1);
                FLOWLOCK_UNLOCK(f);

                if (p == NULL) {
                    TmqhOutputPacketpool(NULL, reassemble_p);
                    FBLOCK_UNLOCK(fb);
                    return;
                }
                PKT_SET_SRC(p, PKT_SRC_FFR_SHUTDOWN);

                if (stream_pseudo_pkt_detect_prev_TV != NULL) {
                    stream_pseudo_pkt_detect_prev_TV->
                        tmqh_out(stream_pseudo_pkt_detect_prev_TV, p);
                } else {
                    TmSlot *s = stream_pseudo_pkt_detect_tm_slot;
                    while (s != NULL) {
                        TmSlotFunc SlotFunc = SC_ATOMIC_GET(s->SlotFunc);
                        SlotFunc(NULL, p, SC_ATOMIC_GET(s->slot_data), &s->slot_pre_pq,
                                    &s->slot_post_pq);
                        s = s->slot_next;
                    }

                    if (stream_pseudo_pkt_detect_TV != NULL) {
                        stream_pseudo_pkt_detect_TV->
                            tmqh_out(stream_pseudo_pkt_detect_TV, p);
                    } else {
                        TmqhOutputPacketpool(NULL, p);
                    }
                }
            }
            if (server_ok) {
                FLOWLOCK_WRLOCK(f);
                Packet *p = FlowForceReassemblyPseudoPacketGet(1, f, ssn, 1);
                FLOWLOCK_UNLOCK(f);

                if (p == NULL) {
                    TmqhOutputPacketpool(NULL, reassemble_p);
                    FBLOCK_UNLOCK(fb);
                    return;
                }
                PKT_SET_SRC(p, PKT_SRC_FFR_SHUTDOWN);

                if (stream_pseudo_pkt_detect_prev_TV != NULL) {
                    stream_pseudo_pkt_detect_prev_TV->
                        tmqh_out(stream_pseudo_pkt_detect_prev_TV, p);
                } else {
                    TmSlot *s = stream_pseudo_pkt_detect_tm_slot;
                    while (s != NULL) {
                        TmSlotFunc SlotFunc = SC_ATOMIC_GET(s->SlotFunc);
                        SlotFunc(NULL, p, SC_ATOMIC_GET(s->slot_data), &s->slot_pre_pq,
                                    &s->slot_post_pq);
                        s = s->slot_next;
                    }

                    if (stream_pseudo_pkt_detect_TV != NULL) {
                        stream_pseudo_pkt_detect_TV->
                            tmqh_out(stream_pseudo_pkt_detect_TV, p);
                    } else {
                        TmqhOutputPacketpool(NULL, p);
                    }
                }
            }

            /* next flow in the queue */
            f = f->hnext;
        }
        FBLOCK_UNLOCK(fb);
    }

    PKT_SET_SRC(reassemble_p, PKT_SRC_FFR_SHUTDOWN);
    TmqhOutputPacketpool(NULL, reassemble_p);
    return;
}