/** * \brief ERF file reading loop. */ TmEcode ReceiveErfFileLoop(ThreadVars *tv, void *data, void *slot) { Packet *p = NULL; uint16_t packet_q_len = 0; ErfFileThreadVars *etv = (ErfFileThreadVars *)data; etv->slot = ((TmSlot *)slot)->slot_next; while (1) { if (suricata_ctl_flags & (SURICATA_STOP | SURICATA_KILL)) { SCReturnInt(TM_ECODE_OK); } /* Make sure we have at least one packet in the packet pool, * to prevent us from alloc'ing packets at line rate. */ do { #ifdef __tile__ packet_q_len = PacketPoolSize(0); #else packet_q_len = PacketPoolSize(); #endif if (unlikely(packet_q_len == 0)) { #ifdef __tile__ PacketPoolWait(0); #else PacketPoolWait(); #endif } } while (packet_q_len == 0); #ifdef __tile__ p = PacketGetFromQueueOrAlloc(0); #else p = PacketGetFromQueueOrAlloc(); #endif if (unlikely(p == NULL)) { SCLogError(SC_ERR_MEM_ALLOC, "Failed to allocate a packet."); EngineStop(); SCReturnInt(TM_ECODE_FAILED); } PKT_SET_SRC(p, PKT_SRC_WIRE); if (ReadErfRecord(tv, p, data) != TM_ECODE_OK) { TmqhOutputPacketpool(etv->tv, p); EngineStop(); SCReturnInt(TM_ECODE_FAILED); } if (TmThreadsSlotProcessPkt(etv->tv, etv->slot, p) != TM_ECODE_OK) { EngineStop(); SCReturnInt(TM_ECODE_FAILED); } } SCReturnInt(TM_ECODE_FAILED); }
/** * \brief Main PCAP reading Loop function */ TmEcode ReceivePcapLoop(ThreadVars *tv, void *data, void *slot) { SCEnter(); int packet_q_len = 64; PcapThreadVars *ptv = (PcapThreadVars *)data; int r; TmSlot *s = (TmSlot *)slot; ptv->slot = s->slot_next; ptv->cb_result = TM_ECODE_OK; while (1) { if (suricata_ctl_flags & SURICATA_STOP) { SCReturnInt(TM_ECODE_OK); } /* make sure we have at least one packet in the packet pool, to prevent * us from alloc'ing packets at line rate */ PacketPoolWait(); /* Right now we just support reading packets one at a time. */ r = pcap_dispatch(ptv->pcap_handle, packet_q_len, (pcap_handler)PcapCallbackLoop, (u_char *)ptv); if (unlikely(r < 0)) { int dbreak = 0; SCLogError(SC_ERR_PCAP_DISPATCH, "error code %" PRId32 " %s", r, pcap_geterr(ptv->pcap_handle)); #ifdef PCAP_ERROR_BREAK if (r == PCAP_ERROR_BREAK) { SCReturnInt(ptv->cb_result); } #endif do { usleep(PCAP_RECONNECT_TIMEOUT); if (suricata_ctl_flags != 0) { dbreak = 1; break; } r = PcapTryReopen(ptv); } while (r < 0); if (dbreak) { break; } } else if (ptv->cb_result == TM_ECODE_FAILED) { SCLogError(SC_ERR_PCAP_DISPATCH, "Pcap callback PcapCallbackLoop failed"); SCReturnInt(TM_ECODE_FAILED); } else if (unlikely(r == 0)) { TmThreadsCaptureInjectPacket(tv, ptv->slot, NULL); } StatsSyncCountersIfSignalled(tv); } PcapDumpCounters(ptv); StatsSyncCountersIfSignalled(tv); SCReturnInt(TM_ECODE_OK); }
/** * \brief Main PCAP file reading Loop function */ TmEcode ReceivePcapFileLoop(ThreadVars *tv, void *data, void *slot) { SCEnter(); uint16_t packet_q_len = 0; PcapFileThreadVars *ptv = (PcapFileThreadVars *)data; int r; TmSlot *s = (TmSlot *)slot; ptv->slot = s->slot_next; ptv->cb_result = TM_ECODE_OK; while (1) { if (suricata_ctl_flags & (SURICATA_STOP | SURICATA_KILL)) { SCReturnInt(TM_ECODE_OK); } /* make sure we have at least one packet in the packet pool, to prevent * us from alloc'ing packets at line rate */ do { packet_q_len = PacketPoolSize(); if (unlikely(packet_q_len == 0)) { PacketPoolWait(); } } while (packet_q_len == 0); /* Right now we just support reading packets one at a time. */ r = pcap_dispatch(pcap_g.pcap_handle, (int)packet_q_len, (pcap_handler)PcapFileCallbackLoop, (u_char *)ptv); if (unlikely(r == -1)) { SCLogError(SC_ERR_PCAP_DISPATCH, "error code %" PRId32 " %s", r, pcap_geterr(pcap_g.pcap_handle)); /* in the error state we just kill the engine */ EngineKill(); SCReturnInt(TM_ECODE_FAILED); } else if (unlikely(r == 0)) { SCLogInfo("pcap file end of file reached (pcap err code %" PRId32 ")", r); EngineStop(); break; } else if (ptv->cb_result == TM_ECODE_FAILED) { SCLogError(SC_ERR_PCAP_DISPATCH, "Pcap callback PcapFileCallbackLoop failed"); EngineKill(); SCReturnInt(TM_ECODE_FAILED); } SCPerfSyncCountersIfSignalled(tv, 0); } SCReturnInt(TM_ECODE_OK); }
static inline Packet *FlowForceReassemblyPseudoPacketGet(int direction, Flow *f, TcpSession *ssn, int dummy) { PacketPoolWait(); Packet *p = PacketPoolGetPacket(); if (p == NULL) { return NULL; } PACKET_PROFILING_START(p); return FlowForceReassemblyPseudoPacketSetup(p, direction, f, ssn, dummy); }
/** * \brief Main PCAP file reading Loop function */ TmEcode PcapFileDispatch(PcapFileFileVars *ptv) { SCEnter(); int packet_q_len = 64; int r; TmEcode loop_result = TM_ECODE_OK; strlcpy(pcap_filename, ptv->filename, sizeof(pcap_filename)); while (loop_result == TM_ECODE_OK) { if (suricata_ctl_flags & SURICATA_STOP) { SCReturnInt(TM_ECODE_OK); } /* make sure we have at least one packet in the packet pool, to prevent * us from alloc'ing packets at line rate */ PacketPoolWait(); /* Right now we just support reading packets one at a time. */ r = pcap_dispatch(ptv->pcap_handle, packet_q_len, (pcap_handler)PcapFileCallbackLoop, (u_char *)ptv); if (unlikely(r == -1)) { SCLogError(SC_ERR_PCAP_DISPATCH, "error code %" PRId32 " %s for %s", r, pcap_geterr(ptv->pcap_handle), ptv->filename); if (ptv->shared->cb_result == TM_ECODE_FAILED) { SCReturnInt(TM_ECODE_FAILED); } loop_result = TM_ECODE_DONE; } else if (unlikely(r == 0)) { SCLogInfo("pcap file %s end of file reached (pcap err code %" PRId32 ")", ptv->filename, r); ptv->shared->files++; loop_result = TM_ECODE_DONE; } else if (ptv->shared->cb_result == TM_ECODE_FAILED) { SCLogError(SC_ERR_PCAP_DISPATCH, "Pcap callback PcapFileCallbackLoop failed for %s", ptv->filename); loop_result = TM_ECODE_FAILED; } StatsSyncCountersIfSignalled(ptv->shared->tv); } SCReturnInt(loop_result); }
/** \brief Wait until we have the requested ammount of packets in the pool * * In some cases waiting for packets is undesirable. Especially when * a wait would happen under a lock of some kind, other parts of the * engine could have to wait. * * This function only returns when at least N packets are in our pool. * * \param n number of packets needed */ void PacketPoolWaitForN(int n) { PktPool *my_pool = GetThreadPacketPool(); Packet *p = NULL; while (1) { int i = 0; PacketPoolWait(); /* count packets in our stack */ p = my_pool->head; while (p != NULL) { if (++i == n) return; p = p->next; } /* continue counting in the return stack */ if (my_pool->return_stack.head != NULL) { SCMutexLock(&my_pool->return_stack.mutex); p = my_pool->return_stack.head; while (p != NULL) { if (++i == n) { SCMutexUnlock(&my_pool->return_stack.mutex); return; } p = p->next; } SCMutexUnlock(&my_pool->return_stack.mutex); /* or signal that we need packets and wait */ } else { SCMutexLock(&my_pool->return_stack.mutex); SC_ATOMIC_ADD(my_pool->return_stack.sync_now, 1); SCCondWait(&my_pool->return_stack.cond, &my_pool->return_stack.mutex); SCMutexUnlock(&my_pool->return_stack.mutex); } } }
/** * \brief NFQ receive module main entry function: receive a packet from NFQ */ TmEcode ReceiveNFQ(ThreadVars *tv, Packet *p, void *data, PacketQueue *pq, PacketQueue *postpq) { NFQThreadVars *ntv = (NFQThreadVars *)data; NFQQueueVars *nq = NFQGetQueue(ntv->nfq_index); if (nq == NULL) { SCLogWarning(SC_ERR_INVALID_ARGUMENT, "can't get queue for %" PRId16 "", ntv->nfq_index); return TM_ECODE_FAILED; } /* make sure we have at least one packet in the packet pool, to prevent * us from 1) alloc'ing packets at line rate, 2) have a race condition * for the nfq mutex lock with the verdict thread. */ while (PacketPoolSize() == 0) { PacketPoolWait(); } /* do our nfq magic */ NFQRecvPkt(nq, ntv); return TM_ECODE_OK; }
/** * \brief Process a chunk of records read from a DAG interface. * * This function takes a pointer to buffer read from the DAG interface * and processes it individual records. */ static inline TmEcode ProcessErfDagRecords(ErfDagThreadVars *ewtn, uint8_t *top, uint32_t *pkts_read) { SCEnter(); int err = 0; dag_record_t *dr = NULL; char *prec = NULL; int rlen; char hdr_type = 0; int processed = 0; int packet_q_len = 0; *pkts_read = 0; while (((top - ewtn->btm) >= dag_record_size) && ((processed + dag_record_size) < 4*1024*1024)) { /* Make sure we have at least one packet in the packet pool, * to prevent us from alloc'ing packets at line rate. */ do { packet_q_len = PacketPoolSize(); if (unlikely(packet_q_len == 0)) { PacketPoolWait(); } } while (packet_q_len == 0); prec = (char *)ewtn->btm; dr = (dag_record_t*)prec; rlen = ntohs(dr->rlen); hdr_type = dr->type; /* If we don't have enough data to finsih processing this ERF record * return and maybe next time we will. */ if ((top - ewtn->btm) < rlen) SCReturnInt(TM_ECODE_OK); ewtn->btm += rlen; processed += rlen; /* Only support ethernet at this time. */ switch (hdr_type & 0x7f) { case TYPE_PAD: /* Skip. */ continue; case TYPE_DSM_COLOR_ETH: case TYPE_COLOR_ETH: case TYPE_COLOR_HASH_ETH: /* In these types the color value overwrites the lctr * (drop count). */ break; case TYPE_ETH: if (dr->lctr) { SCPerfCounterIncr(ewtn->drops, ewtn->tv->sc_perf_pca); } break; default: SCLogError(SC_ERR_UNIMPLEMENTED, "Processing of DAG record type: %d not implemented.", dr->type); SCReturnInt(TM_ECODE_FAILED); } err = ProcessErfDagRecord(ewtn, prec); if (err != TM_ECODE_OK) { SCReturnInt(TM_ECODE_FAILED); } (*pkts_read)++; } SCReturnInt(TM_ECODE_OK); }
/** * \brief Recieves packets from an interface via libpfring. * * This function recieves packets from an interface and passes * the packet on to the pfring callback function. * * \param tv pointer to ThreadVars * \param data pointer that gets cast into PfringThreadVars for ptv * \param slot slot containing task information * \retval TM_ECODE_OK on success * \retval TM_ECODE_FAILED on failure */ TmEcode ReceivePfringLoop(ThreadVars *tv, void *data, void *slot) { SCEnter(); uint16_t packet_q_len = 0; PfringThreadVars *ptv = (PfringThreadVars *)data; Packet *p = NULL; struct pfring_pkthdr hdr; TmSlot *s = (TmSlot *)slot; time_t last_dump = 0; struct timeval current_time; ptv->slot = s->slot_next; while(1) { if (suricata_ctl_flags & (SURICATA_STOP | SURICATA_KILL)) { SCReturnInt(TM_ECODE_OK); } /* make sure we have at least one packet in the packet pool, to prevent * us from alloc'ing packets at line rate */ do { packet_q_len = PacketPoolSize(); if (unlikely(packet_q_len == 0)) { PacketPoolWait(); } } while (packet_q_len == 0); p = PacketGetFromQueueOrAlloc(); if (p == NULL) { SCReturnInt(TM_ECODE_FAILED); } PKT_SET_SRC(p, PKT_SRC_WIRE); /* Some flavours of PF_RING may fail to set timestamp - see PF-RING-enabled libpcap code*/ hdr.ts.tv_sec = hdr.ts.tv_usec = 0; /* Depending on what compile time options are used for pfring we either return 0 or -1 on error and always 1 for success */ #ifdef HAVE_PFRING_RECV_UCHAR u_char *pkt_buffer = GET_PKT_DIRECT_DATA(p); u_int buffer_size = GET_PKT_DIRECT_MAX_SIZE(p); int r = pfring_recv(ptv->pd, &pkt_buffer, buffer_size, &hdr, LIBPFRING_WAIT_FOR_INCOMING); /* Check for Zero-copy if buffer size is zero */ if (buffer_size == 0) { PacketSetData(p, pkt_buffer, hdr.caplen); } #else int r = pfring_recv(ptv->pd, (char *)GET_PKT_DIRECT_DATA(p), (u_int)GET_PKT_DIRECT_MAX_SIZE(p), &hdr, LIBPFRING_WAIT_FOR_INCOMING); #endif /* HAVE_PFRING_RECV_UCHAR */ if (r == 1) { //printf("RecievePfring src %" PRIu32 " sport %" PRIu32 " dst %" PRIu32 " dstport %" PRIu32 "\n", // hdr.parsed_pkt.ipv4_src,hdr.parsed_pkt.l4_src_port, hdr.parsed_pkt.ipv4_dst,hdr.parsed_pkt.l4_dst_port); PfringProcessPacket(ptv, &hdr, p); if (TmThreadsSlotProcessPkt(ptv->tv, ptv->slot, p) != TM_ECODE_OK) { TmqhOutputPacketpool(ptv->tv, p); SCReturnInt(TM_ECODE_FAILED); } /* Trigger one dump of stats every second */ TimeGet(¤t_time); if (current_time.tv_sec != last_dump) { PfringDumpCounters(ptv); last_dump = current_time.tv_sec; } } else { SCLogError(SC_ERR_PF_RING_RECV,"pfring_recv error %" PRId32 "", r); TmqhOutputPacketpool(ptv->tv, p); SCReturnInt(TM_ECODE_FAILED); } SCPerfSyncCountersIfSignalled(tv); } return TM_ECODE_OK; }
/** * \brief Recieves packets from an interface via libpfring. * * This function recieves packets from an interface and passes * the packet on to the pfring callback function. * * \param tv pointer to ThreadVars * \param data pointer that gets cast into PfringThreadVars for ptv * \param slot slot containing task information * \retval TM_ECODE_OK on success * \retval TM_ECODE_FAILED on failure */ TmEcode ReceivePfringLoop(ThreadVars *tv, void *data, void *slot) { SCEnter(); PfringThreadVars *ptv = (PfringThreadVars *)data; Packet *p = NULL; struct pfring_pkthdr hdr; TmSlot *s = (TmSlot *)slot; time_t last_dump = 0; u_int buffer_size; u_char *pkt_buffer; ptv->slot = s->slot_next; /* we have to enable the ring here as we need to do it after all * the threads have called pfring_set_cluster(). */ int rc = pfring_enable_ring(ptv->pd); if (rc != 0) { SCLogError(SC_ERR_PF_RING_OPEN, "pfring_enable_ring failed returned %d ", rc); SCReturnInt(TM_ECODE_FAILED); } while(1) { if (suricata_ctl_flags & (SURICATA_STOP | SURICATA_KILL)) { SCReturnInt(TM_ECODE_OK); } /* make sure we have at least one packet in the packet pool, to prevent * us from alloc'ing packets at line rate */ PacketPoolWait(); p = PacketGetFromQueueOrAlloc(); if (p == NULL) { SCReturnInt(TM_ECODE_FAILED); } PKT_SET_SRC(p, PKT_SRC_WIRE); /* Some flavours of PF_RING may fail to set timestamp - see PF-RING-enabled libpcap code*/ hdr.ts.tv_sec = hdr.ts.tv_usec = 0; /* Check for Zero-copy mode */ if (ptv->flags & PFRING_FLAGS_ZERO_COPY) { buffer_size = 0; pkt_buffer = NULL; } else { buffer_size = GET_PKT_DIRECT_MAX_SIZE(p); pkt_buffer = GET_PKT_DIRECT_DATA(p); } int r = pfring_recv(ptv->pd, &pkt_buffer, buffer_size, &hdr, LIBPFRING_WAIT_FOR_INCOMING); if (likely(r == 1)) { /* profiling started before blocking pfring_recv call, so * reset it here */ PACKET_PROFILING_RESTART(p); /* Check for Zero-copy mode */ if (ptv->flags & PFRING_FLAGS_ZERO_COPY) { PacketSetData(p, pkt_buffer, hdr.caplen); } //printf("RecievePfring src %" PRIu32 " sport %" PRIu32 " dst %" PRIu32 " dstport %" PRIu32 "\n", // hdr.parsed_pkt.ipv4_src,hdr.parsed_pkt.l4_src_port, hdr.parsed_pkt.ipv4_dst,hdr.parsed_pkt.l4_dst_port); PfringProcessPacket(ptv, &hdr, p); if (TmThreadsSlotProcessPkt(ptv->tv, ptv->slot, p) != TM_ECODE_OK) { TmqhOutputPacketpool(ptv->tv, p); SCReturnInt(TM_ECODE_FAILED); } /* Trigger one dump of stats every second */ if (p->ts.tv_sec != last_dump) { PfringDumpCounters(ptv); last_dump = p->ts.tv_sec; } } else if (unlikely(r == 0)) { if (suricata_ctl_flags & (SURICATA_STOP | SURICATA_KILL)) { SCReturnInt(TM_ECODE_OK); } /* pfring didn't use the packet yet */ TmThreadsCaptureInjectPacket(tv, ptv->slot, p); } else { SCLogError(SC_ERR_PF_RING_RECV,"pfring_recv error %" PRId32 "", r); TmqhOutputPacketpool(ptv->tv, p); SCReturnInt(TM_ECODE_FAILED); } StatsSyncCountersIfSignalled(tv); } return TM_ECODE_OK; }
TmEcode ReceiveIPFWLoop(ThreadVars *tv, void *data, void *slot) { SCEnter(); IPFWThreadVars *ptv = (IPFWThreadVars *)data; IPFWQueueVars *nq = NULL; uint8_t pkt[IP_MAXPACKET]; int pktlen=0; struct pollfd IPFWpoll; struct timeval IPFWts; Packet *p = NULL; uint16_t packet_q_len = 0; nq = IPFWGetQueue(ptv->ipfw_index); if (nq == NULL) { SCLogWarning(SC_ERR_INVALID_ARGUMENT, "Can't get thread variable"); SCReturnInt(TM_ECODE_FAILED); } SCLogInfo("Thread '%s' will run on port %d (item %d)", tv->name, nq->port_num, ptv->ipfw_index); while (1) { if (suricata_ctl_flags & (SURICATA_STOP || SURICATA_KILL)) { SCReturnInt(TM_ECODE_OK); } IPFWpoll.fd = nq->fd; IPFWpoll.events = POLLRDNORM; /* Poll the socket for status */ if ( (poll(&IPFWpoll, 1, IPFW_SOCKET_POLL_MSEC)) > 0) { if (!(IPFWpoll.revents & (POLLRDNORM | POLLERR))) continue; } if ((pktlen = recvfrom(nq->fd, pkt, sizeof(pkt), 0, (struct sockaddr *)&nq->ipfw_sin, &nq->ipfw_sinlen)) == -1) { /* We received an error on socket read */ if (errno == EINTR || errno == EWOULDBLOCK) { /* Nothing for us to process */ continue; } else { SCLogWarning(SC_WARN_IPFW_RECV, "Read from IPFW divert socket failed: %s", strerror(errno)); SCReturnInt(TM_ECODE_FAILED); } } /* We have a packet to process */ memset (&IPFWts, 0, sizeof(struct timeval)); gettimeofday(&IPFWts, NULL); /* make sure we have at least one packet in the packet pool, to prevent * us from alloc'ing packets at line rate */ do { packet_q_len = PacketPoolSize(); if (unlikely(packet_q_len == 0)) { PacketPoolWait(); } } while (packet_q_len == 0); p = PacketGetFromQueueOrAlloc(); if (p == NULL) { SCReturnInt(TM_ECODE_FAILED); } PKT_SET_SRC(p, PKT_SRC_WIRE); SCLogDebug("Received Packet Len: %d", pktlen); p->ts.tv_sec = IPFWts.tv_sec; p->ts.tv_usec = IPFWts.tv_usec; ptv->pkts++; ptv->bytes += pktlen; p->datalink = ptv->datalink; p->ipfw_v.ipfw_index = ptv->ipfw_index; PacketCopyData(p, pkt, pktlen); SCLogDebug("Packet info: pkt_len: %" PRIu32 " (pkt %02x, pkt_data %02x)", GET_PKT_LEN(p), *pkt, GET_PKT_DATA(p)); if (TmThreadsSlotProcessPkt(tv, ((TmSlot *) slot)->slot_next, p) != TM_ECODE_OK) { TmqhOutputPacketpool(tv, p); SCReturnInt(TM_ECODE_FAILED); } SCPerfSyncCountersIfSignalled(tv, 0); } SCReturnInt(TM_ECODE_OK); }
/** * \brief Main Napatech reading Loop function */ TmEcode NapatechStreamLoop(ThreadVars *tv, void *data, void *slot) { SCEnter(); int32_t status; char errbuf[100]; uint16_t packet_q_len = 0; uint64_t pkt_ts; NtNetBuf_t packet_buffer; NapatechThreadVars *ntv = (NapatechThreadVars *)data; NtNetRx_t stat_cmd; SCLogInfo("Opening NAPATECH Stream: %lu for processing", ntv->stream_id); if ((status = NT_NetRxOpen(&(ntv->rx_stream), "SuricataStream", NT_NET_INTERFACE_PACKET, ntv->stream_id, ntv->hba)) != NT_SUCCESS) { NT_ExplainError(status, errbuf, sizeof(errbuf)); SCLogError(SC_ERR_NAPATECH_OPEN_FAILED, "Failed to open NAPATECH Stream: %lu - %s", ntv->stream_id, errbuf); SCFree(ntv); SCReturnInt(TM_ECODE_FAILED); } stat_cmd.cmd = NT_NETRX_READ_CMD_STREAM_DROP; SCLogInfo("Napatech Packet Stream Loop Started for Stream ID: %lu", ntv->stream_id); TmSlot *s = (TmSlot *)slot; ntv->slot = s->slot_next; while (!(suricata_ctl_flags & (SURICATA_STOP | SURICATA_KILL))) { /* make sure we have at least one packet in the packet pool, to prevent * us from alloc'ing packets at line rate */ do { packet_q_len = PacketPoolSize(); if (unlikely(packet_q_len == 0)) { PacketPoolWait(); } } while (packet_q_len == 0); /* * Napatech returns packets 1 at a time */ status = NT_NetRxGet(ntv->rx_stream, &packet_buffer, 1000); if (unlikely(status == NT_STATUS_TIMEOUT || status == NT_STATUS_TRYAGAIN)) { /* * no frames currently available */ continue; } else if (unlikely(status != NT_SUCCESS)) { SCLogError(SC_ERR_NAPATECH_STREAM_NEXT_FAILED, "Failed to read from Napatech Stream: %lu", ntv->stream_id); SCReturnInt(TM_ECODE_FAILED); } Packet *p = PacketGetFromQueueOrAlloc(); if (unlikely(p == NULL)) { NT_NetRxRelease(ntv->rx_stream, packet_buffer); SCReturnInt(TM_ECODE_FAILED); } pkt_ts = NT_NET_GET_PKT_TIMESTAMP(packet_buffer); /* * Handle the different timestamp forms that the napatech cards could use * - NT_TIMESTAMP_TYPE_NATIVE is not supported due to having an base of 0 as opposed to NATIVE_UNIX which has a base of 1/1/1970 */ switch(NT_NET_GET_PKT_TIMESTAMP_TYPE(packet_buffer)) { case NT_TIMESTAMP_TYPE_NATIVE_UNIX: p->ts.tv_sec = pkt_ts / 100000000; p->ts.tv_usec = ((pkt_ts % 100000000) / 100) + (pkt_ts % 100) > 50 ? 1 : 0; break; case NT_TIMESTAMP_TYPE_PCAP: p->ts.tv_sec = pkt_ts >> 32; p->ts.tv_usec = pkt_ts & 0xFFFFFFFF; break; case NT_TIMESTAMP_TYPE_PCAP_NANOTIME: p->ts.tv_sec = pkt_ts >> 32; p->ts.tv_usec = ((pkt_ts & 0xFFFFFFFF) / 1000) + (pkt_ts % 1000) > 500 ? 1 : 0; break; case NT_TIMESTAMP_TYPE_NATIVE_NDIS: /* number of seconds between 1/1/1601 and 1/1/1970 */ p->ts.tv_sec = (pkt_ts / 100000000) - 11644473600; p->ts.tv_usec = ((pkt_ts % 100000000) / 100) + (pkt_ts % 100) > 50 ? 1 : 0; break; default: SCLogError(SC_ERR_NAPATECH_TIMESTAMP_TYPE_NOT_SUPPORTED, "Packet from Napatech Stream: %lu does not have a supported timestamp format", ntv->stream_id); NT_NetRxRelease(ntv->rx_stream, packet_buffer); SCReturnInt(TM_ECODE_FAILED); } SCLogDebug("p->ts.tv_sec %"PRIuMAX"", (uintmax_t)p->ts.tv_sec); p->datalink = LINKTYPE_ETHERNET; ntv->pkts++; ntv->bytes += NT_NET_GET_PKT_WIRE_LENGTH(packet_buffer); // Update drop counter if (unlikely((status = NT_NetRxRead(ntv->rx_stream, &stat_cmd)) != NT_SUCCESS)) { NT_ExplainError(status, errbuf, sizeof(errbuf)); SCLogWarning(SC_ERR_NAPATECH_STAT_DROPS_FAILED, "Couldn't retrieve drop statistics from the RX stream: %lu - %s", ntv->stream_id, errbuf); } else { ntv->drops += stat_cmd.u.streamDrop.pktsDropped; } if (unlikely(PacketCopyData(p, (uint8_t *)NT_NET_GET_PKT_L2_PTR(packet_buffer), NT_NET_GET_PKT_WIRE_LENGTH(packet_buffer)))) { TmqhOutputPacketpool(ntv->tv, p); NT_NetRxRelease(ntv->rx_stream, packet_buffer); SCReturnInt(TM_ECODE_FAILED); } if (unlikely(TmThreadsSlotProcessPkt(ntv->tv, ntv->slot, p) != TM_ECODE_OK)) { TmqhOutputPacketpool(ntv->tv, p); NT_NetRxRelease(ntv->rx_stream, packet_buffer); SCReturnInt(TM_ECODE_FAILED); } NT_NetRxRelease(ntv->rx_stream, packet_buffer); SCPerfSyncCountersIfSignalled(tv); } SCReturnInt(TM_ECODE_OK); }
/** * \brief Thread entry function for reading ERF records from a DAG card. * * Reads a new ERF record the DAG input buffer and copies it to * an internal Suricata packet buffer -- similar to the way the * pcap packet handler works. * * We create new packet structures using PacketGetFromQueueOrAlloc * for each packet between the top and btm pointers except for * the first packet for which a Packet buffer is provided * from the packetpool. * * We always read up to dag_max_read_packets ERF packets from the * DAG buffer, but we might read less. This differs from the * ReceivePcap handler -- it will only read pkts up to a maximum * of either the packetpool count or the pcap_max_read_packets. * * \param tv pointer to ThreadVars * \param p data pointer * \param data * \param pq pointer to the PacketQueue (not used here) * \param postpq * \retval TM_ECODE_FAILED on failure and TM_ECODE_OK on success. * \note We also use the packetpool hack first used in the source-pcap * handler so we don't keep producing packets without any dying. * This implies that if we are in this situation we run the risk * of dropping packets at the interface. */ TmEcode ReceiveErfDag(ThreadVars *tv, Packet *p, void *data, PacketQueue *pq, PacketQueue *postpq) { SCEnter(); uint16_t packet_q_len = 0; uint32_t diff = 0; int err; uint8_t *top = NULL; uint32_t pkts_read = 0; assert(p); assert(pq); assert(postpq); ErfDagThreadVars *ewtn = (ErfDagThreadVars *)data; /* NOTE/JNM: Hack copied from source-pcap.c * * Make sure we have at least one packet in the packet pool, to * prevent us from alloc'ing packets at line rate */ while (packet_q_len == 0) { packet_q_len = PacketPoolSize(); if (packet_q_len == 0) { PacketPoolWait(); } } if (postpq == NULL) { ewtn->dag_max_read_packets = 1; } while(pkts_read == 0) { if (suricata_ctl_flags != 0) { break; } /* NOTE/JNM: This might not work well if we start restricting the * number of ERF records processed per call to a small number as * the over head required here could exceed the time it takes to * process a small number of ERF records. * * XXX/JNM: Possibly process the DAG stream buffer first if there * are ERF packets or else call dag_advance_stream and then process * the DAG stream buffer. */ top = dag_advance_stream(ewtn->dagfd, ewtn->dagstream, &(ewtn->btm)); if (NULL == top) { if((ewtn->dagstream & 0x1) && (errno == EAGAIN)) { usleep(10 * 1000); ewtn->btm = ewtn->top; continue; } else { SCLogError(SC_ERR_ERF_DAG_STREAM_READ_FAILED, "Failed to read from stream: %d, DAG: %s when using dag_advance_stream", ewtn->dagstream, ewtn->dagname); SCReturnInt(TM_ECODE_FAILED); } } diff = top - ewtn->btm; if (diff == 0) { continue; } assert(diff >= dag_record_size); err = ProcessErfDagRecords(ewtn, p, top, postpq, &pkts_read); if (err == TM_ECODE_FAILED) { SCLogError(SC_ERR_ERF_DAG_STREAM_READ_FAILED, "Failed to read from stream: %d, DAG: %s", ewtn->dagstream, ewtn->dagname); ReceiveErfDagCloseStream(ewtn->dagfd, ewtn->dagstream); SCReturnInt(err); } } SCLogDebug("Read %d records from stream: %d, DAG: %s", pkts_read, ewtn->dagstream, ewtn->dagname); if (suricata_ctl_flags != 0) { SCReturnInt(TM_ECODE_FAILED); } SCReturnInt(err); }
/** * \brief Main Napatech reading Loop function */ TmEcode NapatechFeedLoop(ThreadVars *tv, void *data, void *slot) { SCEnter(); int32_t status; int32_t caplen; PCAP_HEADER *header; uint8_t *frame; uint16_t packet_q_len = 0; NapatechThreadVars *ntv = (NapatechThreadVars *)data; int r; TmSlot *s = (TmSlot *)slot; ntv->slot = s->slot_next; while (1) { if (suricata_ctl_flags & (SURICATA_STOP || SURICATA_KILL)) { SCReturnInt(TM_ECODE_OK); } /* make sure we have at least one packet in the packet pool, to prevent * us from alloc'ing packets at line rate */ do { packet_q_len = PacketPoolSize(); if (unlikely(packet_q_len == 0)) { PacketPoolWait(); } } while (packet_q_len == 0); /* * Napatech returns frames in segment chunks. Function ntci_next_frame * returns 1 for a frame, 0 if the segment is empty, and -1 on error */ status = napatech_next_frame (ntv->feed, &header, &frame); if (status == 0) { /* * no frames currently available */ continue; } else if (status < 0) { SCLogError(SC_ERR_NAPATECH_FEED_NEXT_FAILED, "Failed to read from Napatech feed %d:%d", ntv->adapter_number, ntv->feed_number); SCReturnInt(TM_ECODE_FAILED); } // beware that storelen is aligned; therefore, it may be larger than "caplen" caplen = (header->wireLen < header->storeLen) ? header->wireLen : header->storeLen; Packet *p = PacketGetFromQueueOrAlloc(); if (unlikely(p == NULL)) { SCReturnInt(TM_ECODE_FAILED); } p->ts.tv_sec = header->ts.tv_sec; p->ts.tv_usec = header->ts.tv_usec; SCLogDebug("p->ts.tv_sec %"PRIuMAX"", (uintmax_t)p->ts.tv_sec); p->datalink = LINKTYPE_ETHERNET; ntv->pkts++; ntv->bytes += caplen; if (unlikely(PacketCopyData(p, frame, caplen))) { TmqhOutputPacketpool(ntv->tv, p); SCReturnInt(TM_ECODE_FAILED); } if (TmThreadsSlotProcessPkt(ntv->tv, ntv->slot, p) != TM_ECODE_OK) { TmqhOutputPacketpool(ntv->tv, p); SCReturnInt(TM_ECODE_FAILED); } } SCReturnInt(TM_ECODE_OK); }