TmEcode DecodePcapFile(ThreadVars *tv, Packet *p, void *data, PacketQueue *pq, PacketQueue *postpq) { SCEnter(); DecodeThreadVars *dtv = (DecodeThreadVars *)data; /* update counters */ SCPerfCounterIncr(dtv->counter_pkts, tv->sc_perf_pca); SCPerfCounterIncr(dtv->counter_pkts_per_sec, tv->sc_perf_pca); SCPerfCounterAddUI64(dtv->counter_bytes, tv->sc_perf_pca, GET_PKT_LEN(p)); #if 0 SCPerfCounterAddDouble(dtv->counter_bytes_per_sec, tv->sc_perf_pca, GET_PKT_LEN(p)); SCPerfCounterAddDouble(dtv->counter_mbit_per_sec, tv->sc_perf_pca, (GET_PKT_LEN(p) * 8)/1000000.0 ); #endif SCPerfCounterAddUI64(dtv->counter_avg_pkt_size, tv->sc_perf_pca, GET_PKT_LEN(p)); SCPerfCounterSetUI64(dtv->counter_max_pkt_size, tv->sc_perf_pca, GET_PKT_LEN(p)); double curr_ts = p->ts.tv_sec + p->ts.tv_usec / 1000.0; if (curr_ts < prev_signaled_ts || (curr_ts - prev_signaled_ts) > 60.0) { prev_signaled_ts = curr_ts; FlowWakeupFlowManagerThread(); } /* update the engine time representation based on the timestamp * of the packet. */ TimeSet(&p->ts); /* call the decoder */ pcap_g.Decoder(tv, dtv, p, GET_PKT_DATA(p), GET_PKT_LEN(p), pq); SCReturnInt(TM_ECODE_OK); }
TmEcode DecodePcapFile(ThreadVars *tv, Packet *p, void *data, PacketQueue *pq, PacketQueue *postpq) { SCEnter(); DecodeThreadVars *dtv = (DecodeThreadVars *)data; /* XXX HACK: flow timeout can call us for injected pseudo packets * see bug: https://redmine.openinfosecfoundation.org/issues/1107 */ if (p->flags & PKT_PSEUDO_STREAM_END) return TM_ECODE_OK; /* update counters */ DecodeUpdatePacketCounters(tv, dtv, p); double curr_ts = p->ts.tv_sec + p->ts.tv_usec / 1000.0; if (curr_ts < prev_signaled_ts || (curr_ts - prev_signaled_ts) > 60.0) { prev_signaled_ts = curr_ts; FlowWakeupFlowManagerThread(); } /* update the engine time representation based on the timestamp * of the packet. */ TimeSet(&p->ts); /* call the decoder */ pcap_g.Decoder(tv, dtv, p, GET_PKT_DATA(p), GET_PKT_LEN(p), pq); #ifdef DEBUG BUG_ON(p->pkt_src != PKT_SRC_WIRE && p->pkt_src != PKT_SRC_FFR); #endif PacketDecodeFinalize(tv, dtv, p); SCReturnInt(TM_ECODE_OK); }
/** * \brief Get a new flow * * Get a new flow. We're checking memcap first and will try to make room * if the memcap is reached. * * \param tv thread vars * \param dtv decode thread vars (for flow log api thread data) * * \retval f *LOCKED* flow on succes, NULL on error. */ static Flow *FlowGetNew(ThreadVars *tv, DecodeThreadVars *dtv, const Packet *p) { Flow *f = NULL; if (FlowCreateCheck(p) == 0) { return NULL; } /* get a flow from the spare queue */ f = FlowDequeue(&flow_spare_q); if (f == NULL) { /* If we reached the max memcap, we get a used flow */ if (!(FLOW_CHECK_MEMCAP(sizeof(Flow) + FlowStorageSize()))) { /* declare state of emergency */ if (!(SC_ATOMIC_GET(flow_flags) & FLOW_EMERGENCY)) { SC_ATOMIC_OR(flow_flags, FLOW_EMERGENCY); FlowTimeoutsEmergency(); /* under high load, waking up the flow mgr each time leads * to high cpu usage. Flows are not timed out much faster if * we check a 1000 times a second. */ FlowWakeupFlowManagerThread(); } f = FlowGetUsedFlow(tv, dtv); if (f == NULL) { /* max memcap reached, so increments the counter */ if (tv != NULL && dtv != NULL) { StatsIncr(tv, dtv->counter_flow_memcap); } /* very rare, but we can fail. Just giving up */ return NULL; } /* freed a flow, but it's unlocked */ } else { /* now see if we can alloc a new flow */ f = FlowAlloc(); if (f == NULL) { if (tv != NULL && dtv != NULL) { StatsIncr(tv, dtv->counter_flow_memcap); } return NULL; } /* flow is initialized but *unlocked* */ } } else { /* flow has been recycled before it went into the spare queue */ /* flow is initialized (recylced) but *unlocked* */ } FLOWLOCK_WRLOCK(f); FlowUpdateCounter(tv, dtv, p->proto); return f; }