struct packet *packet_clone(struct packet *src, unsigned int flags) { struct packet *dst = NULL; if (!(flags & PACKET_FLAG_FORCE_NO_COPY) && !src->pkt_buff) { // If it doesn't have a pkt_buff structure, it means it was not allocated by us // That means that the packet is somewhere probably in a ringbuffer (pcap) dst = packet_alloc(); if (!dst) return NULL; // FIXME get the alignment offset from the input if (packet_buffer_alloc(dst, src->len, 0) != POM_OK) { packet_release(dst); return NULL; } dst->ts = src->ts; memcpy(dst->buff, src->buff, src->len); dst->datalink = src->datalink; dst->input = src->input; // Multipart and stream are not copied return dst; } __sync_fetch_and_add(&src->refcount, 1); return src; }
static void filter_wrapper(struct thread_state *state, struct packet *pkt) { int h; LUA_STACK_MARK(state->lua->L); packet_addref(pkt); lua_pushcfunction(state->lua->L, lua_state_error_formater); h = lua_gettop(state->lua->L); lua_getglobal(state->lua->L, "haka"); lua_getfield(state->lua->L, -1, "filter"); if (!lua_isnil(state->lua->L, -1)) { if (!lua_pushppacket(state->lua->L, pkt)) { LOG_ERROR(core, "packet internal error"); packet_drop(pkt); } else { if (lua_pcall(state->lua->L, 1, 0, h)) { lua_state_print_error(state->lua->L, "filter"); packet_drop(pkt); } } } else { lua_pop(state->lua->L, 1); packet_drop(pkt); } lua_pop(state->lua->L, 2); LUA_STACK_CHECK(state->lua->L, 0); packet_release(pkt); }
int packet_multipart_process(struct packet_multipart *multipart, struct proto_process_stack *stack, unsigned int stack_index) { struct packet *p = packet_alloc(); if (!p) { packet_multipart_cleanup(multipart); return PROTO_ERR; } if (packet_buffer_alloc(p, multipart->cur, multipart->align_offset)) { packet_release(p); packet_multipart_cleanup(multipart); pom_oom(multipart->cur); return PROTO_ERR; } struct packet_multipart_pkt *tmp = multipart->head; for (; tmp; tmp = tmp->next) { if (tmp->offset + tmp->len > multipart->cur) { pomlog(POMLOG_DEBUG "Offset in packet fragment is bigger than packet size."); packet_release(p); packet_multipart_cleanup(multipart); return PROTO_INVALID; } memcpy(p->buff + tmp->offset, tmp->pkt->buff + tmp->pkt_buff_offset, tmp->len); } p->ts = multipart->tail->pkt->ts; p->multipart = multipart; p->len = multipart->cur; p->datalink = multipart->proto; p->input = multipart->head->pkt->input; stack[stack_index].pload = p->buff; stack[stack_index].plen = p->len; stack[stack_index].proto = p->datalink; int res = core_process_multi_packet(stack, stack_index, p); packet_release(p); return (res == PROTO_ERR ? POM_ERR : POM_OK); }
int packet_multipart_cleanup(struct packet_multipart *m) { if (!m) return POM_ERR; struct packet_multipart_pkt *tmp; while (m->head) { tmp = m->head; m->head = tmp->next; packet_release(tmp->pkt); free(tmp); } free(m); return POM_OK; }
static int input_kismet_drone_read(struct input *i) { struct input_kismet_drone_priv *priv = i->priv; if (priv->fd == -1) return POM_ERR; while (1) { struct kismet_drone_packet kpkt; if (pom_read(priv->fd, &kpkt, sizeof(struct kismet_drone_packet)) != POM_OK) return POM_ERR; if (ntohl(kpkt.sentinel) != KISMET_DRONE_SENTINEL) { pomlog(POMLOG_ERR "Invalid sentinel value : 0x%X, expected 0x%X", kpkt.sentinel, KISMET_DRONE_SENTINEL); return POM_ERR; } enum kismet_drone_cmd cmdnum = ntohl(kpkt.drone_cmdnum); uint32_t data_len = ntohl(kpkt.data_len); debug_kismet("CMD %u, data_len %u", cmdnum, data_len); switch (cmdnum) { case kismet_drone_cmd_hello: { if (data_len != sizeof(struct kismet_drone_packet_hello)) { pomlog(POMLOG_ERR "Invalid length for hello packet : got %u, expected %u", data_len, sizeof(struct kismet_drone_packet_hello)); return POM_ERR; } struct kismet_drone_packet_hello hello_pkt; if (pom_read(priv->fd, &hello_pkt, sizeof(struct kismet_drone_packet_hello)) != POM_OK) return POM_ERR; char version[33] = { 0 }; strncpy(version, hello_pkt.kismet_version, 32); char hostname[33] = { 0 }; strncpy(hostname, hello_pkt.host_name, 32); pomlog("Input %s connected to Kismet %s on %s (drone version %u)", i->name, version, hostname, ntohl(hello_pkt.drone_version)); break; } case kismet_drone_cmd_source: { if (data_len != sizeof(struct kismet_drone_packet_source)) { pomlog(POMLOG_ERR "Invalid length for source packet : got %u, expected %u", data_len, sizeof(struct kismet_drone_packet_source)); return POM_ERR; } struct kismet_drone_packet_source source_pkt; if (pom_read(priv->fd, &source_pkt, sizeof(struct kismet_drone_packet_source)) != POM_OK) return POM_ERR; if (source_pkt.invalidate) { // TODO return POM_ERR; } struct kismet_drone_source *src = malloc(sizeof(struct kismet_drone_source)); if (!src) { pom_oom(sizeof(struct kismet_drone_source)); return POM_ERR; } memset(src, 0, sizeof(struct kismet_drone_source)); memcpy(src->uuid, source_pkt.uuid, sizeof(src->uuid)); src->name = strndup(source_pkt.name_str, sizeof(source_pkt.name_str)); src->interface = strndup(source_pkt.interface_str, sizeof(source_pkt.interface_str)); src->type = strndup(source_pkt.type_str, sizeof(source_pkt.type_str)); if (!src->name || !src->interface || !src->type) { if (src->name) free(src->name); if (src->interface) free(src->interface); if (src->type) free(src->type); free(src); pom_oom(sizeof(source_pkt.name_str)); return POM_ERR; } pomlog("New Kismet drone source for input %s : %s (interface: %s, type: %s)", i->name, src->name, src->interface, src->type); if (source_pkt.channel_hop && !source_pkt.channel_dwell) { pomlog(POMLOG_WARN "Warning, source %s from input %s is configured to hop channels without dwelling !", i->name, src->name); } src->next = priv->srcs; if (priv->srcs) priv->srcs->prev = src; priv->srcs = src; break; } case kismet_drone_cmd_cappacket: { if (data_len < sizeof(struct kismet_drone_packet_capture)) { pomlog(POMLOG_ERR "Packet capture data length too small"); return POM_ERR; } struct kismet_drone_packet_capture capture_pkt; if (pom_read(priv->fd, &capture_pkt, sizeof(struct kismet_drone_packet_capture)) != POM_OK) return POM_ERR; debug_kismet("Capture packet bitmap 0x%X, offset %u", ntohl(capture_pkt.content_bitmap), ntohl(capture_pkt.packet_offset)); data_len -= sizeof(struct kismet_drone_packet_capture); uint32_t bitmap = ntohl(capture_pkt.content_bitmap); if (!(bitmap & KISMET_DRONE_BIT_DATA_IEEEPACKET)) { debug_kismet("No data in this packet, skipping %u bytes of data", data_len); if (input_kismet_drone_discard_bytes(priv, data_len) != POM_OK) return POM_ERR; break; } uint32_t offset = ntohl(capture_pkt.packet_offset); if (offset > data_len) { pomlog(POMLOG_ERR "Packet offset bigger than expected length"); return POM_ERR; } if (input_kismet_drone_discard_bytes(priv, offset) != POM_OK) return POM_ERR; data_len -= offset; if (data_len < sizeof(struct kismet_drone_sub_packet_data)) { pomlog(POMLOG_ERR "Remaining data smaller than sub_packet_data"); return POM_ERR; } struct kismet_drone_sub_packet_data data_pkt; if (pom_read(priv->fd, &data_pkt, sizeof(struct kismet_drone_sub_packet_data)) != POM_OK) return POM_ERR; data_len -= sizeof(struct kismet_drone_sub_packet_data); debug_kismet("Capture data packet bitmap 0x%X, hdr len %u, pkt len %u", ntohl(data_pkt.content_bitmap), ntohs(data_pkt.data_hdr_len), ntohs(data_pkt.packet_len)); size_t pkt_len = ntohs(data_pkt.packet_len); if (pkt_len > data_len) { pomlog(POMLOG_ERR "Data packet length bigger than expected data size"); return POM_ERR; } uint32_t dlt = ntohl(data_pkt.dlt); struct proto *datalink = NULL; switch (dlt) { case DLT_IEEE802_11: datalink = priv->datalink_80211; break; case DLT_IEEE802_11_RADIO: datalink = priv->datalink_radiotap; break; default: pomlog(POMLOG_ERR "Unexpected DLT received : %u", dlt); return POM_ERR; } struct packet *pkt = packet_alloc(); if (!pkt) return POM_ERR; if (packet_buffer_alloc(pkt, pkt_len, 0) != POM_OK) { packet_release(pkt); return POM_ERR; } pkt->input = i; pkt->datalink = datalink; pkt->ts = (ntohll(data_pkt.tv_sec) * 1000000UL) + ntohll(data_pkt.tv_usec); if (pom_read(priv->fd, pkt->buff, pkt_len) != POM_OK) return POM_ERR; return core_queue_packet(pkt, 0, 0); } default: { if (input_kismet_drone_discard_bytes(priv, data_len) != POM_OK) return POM_ERR; break; } } } return POM_OK; }
void *core_processing_thread_func(void *priv) { struct core_processing_thread *tpriv = priv; if (packet_info_pool_init()) { halt("Error while initializing the packet_info_pool", 1); return NULL; } registry_perf_inc(perf_thread_active, 1); pom_mutex_lock(&tpriv->pkt_queue_lock); while (core_run) { while (!tpriv->pkt_queue_head) { // We are not active while waiting for a packet registry_perf_dec(perf_thread_active, 1); debug_core("thread %u : waiting", tpriv->thread_id); if (registry_perf_getval(perf_thread_active) == 0) { if (core_get_state() == core_state_finishing) core_set_state(core_state_idle); } if (!core_run) { pom_mutex_unlock(&tpriv->pkt_queue_lock); goto end; } int res = pthread_cond_wait(&tpriv->pkt_queue_cond, &tpriv->pkt_queue_lock); if (res) { pomlog(POMLOG_ERR "Error while waiting for restart condition : %s", pom_strerror(res)); abort(); return NULL; } registry_perf_inc(perf_thread_active, 1); } // Dequeue a packet struct core_packet_queue *tmp = tpriv->pkt_queue_head; tpriv->pkt_queue_head = tmp->next; if (!tpriv->pkt_queue_head) tpriv->pkt_queue_tail = NULL; // Add it to the unused list tmp->next = tpriv->pkt_queue_unused; tpriv->pkt_queue_unused = tmp; tpriv->pkt_count--; registry_perf_dec(perf_pkt_queue, 1); __sync_fetch_and_sub(&core_pkt_queue_count, 1); if (tpriv->pkt_count < CORE_THREAD_PKT_QUEUE_MIN) { pom_mutex_lock(&core_pkt_queue_wait_lock); // Tell the input processes that they can continue queuing packets int res = pthread_cond_broadcast(&core_pkt_queue_wait_cond); if (res) { pomlog(POMLOG_ERR "Error while signaling the main pkt_queue condition : %s", pom_strerror(res)); abort(); } pom_mutex_unlock(&core_pkt_queue_wait_lock); } // Keep track of our packet struct packet *pkt = tmp->pkt; debug_core("thread %u : Processing packet %p (%u.%06u)", tpriv->thread_id, pkt, pom_ptime_sec(pkt->ts), pom_ptime_usec(pkt->ts)); pom_mutex_unlock(&tpriv->pkt_queue_lock); // Lock the processing lock pom_rwlock_rlock(&core_processing_lock); // Update the current clock if (core_clock[tpriv->thread_id] < pkt->ts) // Make sure we keep it monotonous core_clock[tpriv->thread_id] = pkt->ts; //pomlog(POMLOG_DEBUG "Thread %u processing ...", pthread_self()); if (core_process_packet(pkt) == POM_ERR) { core_run = 0; pom_rwlock_unlock(&core_processing_lock); break; } // Process timers if (timers_process() != POM_OK) { pom_rwlock_unlock(&core_processing_lock); break; } pom_rwlock_unlock(&core_processing_lock); if (packet_release(pkt) != POM_OK) { pomlog(POMLOG_ERR "Error while releasing the packet"); break; } debug_core("thread %u : Processed packet %p (%u.%06u)", tpriv->thread_id, pkt, pom_ptime_sec(pkt->ts), pom_ptime_usec(pkt->ts)); // Re-lock our queue for the next run pom_mutex_lock(&tpriv->pkt_queue_lock); } halt("Processing thread encountered an error", 1); end: packet_info_pool_cleanup(); return NULL; }
int stream_process_packet(struct stream *stream, struct packet *pkt, struct proto_process_stack *stack, unsigned int stack_index, uint32_t seq, uint32_t ack) { if (!stream || !pkt || !stack) return PROTO_ERR; debug_stream("thread %p, entry %p, packet %u.%06u, seq %u, ack %u : start", pthread_self(), stream, pom_ptime_sec(pkt->ts), pom_ptime_usec(pkt->ts), seq, ack); struct proto_process_stack *cur_stack = &stack[stack_index]; int direction = cur_stack->direction; int must_wait = 0; pom_mutex_lock(&stream->wait_lock); int res = pthread_mutex_trylock(&stream->lock); if (res == EBUSY) { // Already locked, let's wait a bit must_wait = 1; } else if (res) { pomlog(POMLOG_ERR "Error while locking packet stream lock : %s", pom_strerror(res)); abort(); return POM_ERR; } else { // We got the processing lock. But was it really this thread's turn ? struct stream_thread_wait *tmp = stream->wait_list_head; // A thread with a packet preceding ours is waiting if (tmp && (tmp->ts < pkt->ts)) { // No it wasn't, release it and signal the right thread must_wait = 2; pom_mutex_unlock(&stream->lock); debug_stream("thread %p, entry %p : signaling thread %p", pthread_self(), stream, stream->wait_list_head->thread); pthread_cond_broadcast(&stream->wait_list_head->cond); } else { // Yes it was. YAY ! pom_mutex_unlock(&stream->wait_lock); } } if (must_wait) { // Add ourself in the waiting list struct stream_thread_wait *lst = NULL; if (stream->wait_list_unused) { lst = stream->wait_list_unused; stream->wait_list_unused = lst->next; lst->next = NULL; } else { lst = malloc(sizeof(struct stream_thread_wait)); if (!lst) { pom_oom(sizeof(struct stream_thread_wait)); pom_mutex_unlock(&stream->wait_lock); return POM_ERR; } memset(lst, 0, sizeof(struct stream_thread_wait)); if (pthread_cond_init(&lst->cond, NULL)) { pom_mutex_unlock(&stream->wait_lock); pomlog(POMLOG_ERR "Error while initializing wait list condition : %s", pom_strerror(errno)); free(lst); return POM_ERR; } } lst->ts = pkt->ts; lst->thread = pthread_self(); struct stream_thread_wait *tmp; for (tmp = stream->wait_list_head; tmp && (tmp->ts < lst->ts); tmp = tmp->next); if (tmp) { lst->prev = tmp->prev; if (lst->prev) lst->prev->next = lst; else stream->wait_list_head = lst; lst->next = tmp; lst->next->prev = lst; } else { lst->prev = stream->wait_list_tail; if (lst->prev) lst->prev->next = lst; else stream->wait_list_head = lst; stream->wait_list_tail = lst; } while (1) { debug_stream("thread %p, entry %p, packet %u.%06u, seq %u, ack %u : waiting (%u)", pthread_self(), stream, pom_ptime_sec(pkt->ts), pom_ptime_usec(pkt->ts), seq, ack, must_wait); if (pthread_cond_wait(&lst->cond, &stream->wait_lock)) { pomlog(POMLOG_ERR "Error while waiting for the packet stream wait cond : %s", pom_strerror(errno)); abort(); return POM_ERR; } if (stream->wait_list_head != lst) { // There is a small chance that another stream lock stream->wait_lock while pthread_cond_wait acquires it // If we are not the right thread, then simply signal the right one and wait again for our turn debug_stream("thread %p, entry %p, packet %u.%06u, seq %u, ack %u : wrong thread woke up", pthread_self(), stream, pom_ptime_sec(pkt->ts), pom_ptime_usec(pkt->ts), seq, ack); pthread_cond_broadcast(&stream->wait_list_head->cond); continue; } break; } tmp = stream->wait_list_head; stream->wait_list_head = tmp->next; if (stream->wait_list_head) stream->wait_list_head->prev = NULL; else stream->wait_list_tail = NULL; tmp->next = stream->wait_list_unused; tmp->prev = NULL; stream->wait_list_unused = tmp; pom_mutex_unlock(&stream->wait_lock); pom_mutex_lock(&stream->lock); } debug_stream("thread %p, entry %p, packet %u.%06u, seq %u, ack %u : start locked : cur_seq %u, rev_seq %u", pthread_self(), stream, pom_ptime_sec(pkt->ts), pom_ptime_usec(pkt->ts), seq, ack, stream->cur_seq[direction], stream->cur_seq[POM_DIR_REVERSE(direction)]); // Update the stream flags if (stream->flags & STREAM_FLAG_BIDIR) { // Update flags if (direction == POM_DIR_FWD && !(stream->flags & STREAM_FLAG_GOT_FWD_DIR)) { stream->flags |= STREAM_FLAG_GOT_FWD_DIR; } else if (direction == POM_DIR_REV && !(stream->flags & STREAM_FLAG_GOT_REV_DIR)) { stream->flags |= STREAM_FLAG_GOT_REV_DIR; } } // Update the last timestamp seen on the stream if (stream->last_ts < pkt->ts) stream->last_ts = pkt->ts; // Put this packet in our struct stream_pkt struct stream_pkt spkt = {0}; spkt.pkt = pkt; spkt.seq = seq; spkt.ack = ack; spkt.plen = cur_stack->plen; spkt.stack = stack; spkt.stack_index = stack_index; // Check that we are aware of the start sequence // If not, we queue int dir_flag = (direction == POM_DIR_FWD ? STREAM_FLAG_GOT_FWD_STARTSEQ : STREAM_FLAG_GOT_REV_STARTSEQ); if ( ((stream->flags & STREAM_FLAG_BIDIR) && ((stream->flags & STREAM_FLAG_GOT_BOTH_STARTSEQ) == STREAM_FLAG_GOT_BOTH_STARTSEQ)) || (!(stream->flags & STREAM_FLAG_BIDIR) && (stream->flags & dir_flag)) ) { // Check if the packet is worth processing uint32_t cur_seq = stream->cur_seq[direction]; if (cur_seq != seq) { if (stream_is_packet_old_dupe(stream, &spkt, direction)) { // cur_seq is after the end of the packet, discard it stream_end_process_packet(stream); debug_stream("thread %p, entry %p, packet %u.%06u, seq %u, ack %u : discard", pthread_self(), stream, pom_ptime_sec(pkt->ts), pom_ptime_usec(pkt->ts), seq, ack); return PROTO_OK; } if (stream_remove_dupe_bytes(stream, &spkt, direction) == POM_ERR) { stream_end_process_packet(stream); return PROTO_ERR; } } // Ok let's process it then // Check if it is the packet we're waiting for if (stream_is_packet_next(stream, &spkt, direction)) { // Process it stream->cur_seq[direction] += cur_stack->plen; debug_stream("thread %p, entry %p, packet %u.%06u, seq %u, ack %u : process", pthread_self(), stream, pom_ptime_sec(pkt->ts), pom_ptime_usec(pkt->ts), seq, ack); int res = stream->handler(stream->ce, pkt, stack, stack_index); if (res == PROTO_ERR) { stream_end_process_packet(stream); return PROTO_ERR; } // Flag the stream as running stream->flags |= STREAM_FLAG_RUNNING; // Check if additional packets can be processed struct stream_pkt *p = NULL; unsigned int cur_dir = direction; while ((p = stream_get_next(stream, &cur_dir))) { debug_stream("thread %p, entry %p, packet %u.%06u, seq %u, ack %u : process additional", pthread_self(), stream, pom_ptime_sec(p->pkt->ts), pom_ptime_usec(p->pkt->ts), p->seq, p->ack); if (stream->handler(stream->ce, p->pkt, p->stack, p->stack_index) == POM_ERR) { stream_end_process_packet(stream); return PROTO_ERR; } stream->cur_seq[cur_dir] += p->plen; stream_free_packet(p); } stream_end_process_packet(stream); debug_stream("thread %p, entry %p, packet %u.%06u, seq %u, ack %u : done processed", pthread_self(), stream, pom_ptime_sec(pkt->ts), pom_ptime_usec(pkt->ts), seq, ack); return res; } } else { debug_stream("thread %p, entry %p, packet %u.%06u, seq %u, ack %u : start_seq not known yet", pthread_self(), stream, pom_ptime_sec(pkt->ts), pom_ptime_usec(pkt->ts), seq, ack); } // Queue the packet then debug_stream("thread %p, entry %p, packet %u.%06u, seq %u, ack %u : queue", pthread_self(), stream, pom_ptime_sec(pkt->ts), pom_ptime_usec(pkt->ts), seq, ack); struct stream_pkt *p = malloc(sizeof(struct stream_pkt)); if (!p) { pom_oom(sizeof(struct stream_pkt)); stream_end_process_packet(stream); return PROTO_ERR; } memset(p, 0 , sizeof(struct stream_pkt)); int flags = 0; if (stream->flags & STREAM_FLAG_PACKET_NO_COPY) flags = PACKET_FLAG_FORCE_NO_COPY; p->pkt = packet_clone(pkt, flags); if (!p->pkt) { stream_end_process_packet(stream); free(p); return PROTO_ERR; } p->stack = core_stack_backup(stack, pkt, p->pkt); if (!p->stack) { stream_end_process_packet(stream); packet_release(p->pkt); free(p); return PROTO_ERR; } p->plen = cur_stack->plen; p->seq = seq; p->ack = ack; p->stack_index = stack_index; if (!stream->tail[direction]) { stream->head[direction] = p; stream->tail[direction] = p; } else { struct stream_pkt *tmp = stream->tail[direction]; while ( tmp && ((tmp->seq >= seq && tmp->seq - seq < STREAM_HALF_SEQ) || (tmp->seq <= seq && seq - tmp->seq > STREAM_HALF_SEQ))) { tmp = tmp->prev; } if (!tmp) { // Packet goes at the begining of the list p->next = stream->head[direction]; if (p->next) p->next->prev = p; else stream->tail[direction] = p; stream->head[direction] = p; } else { // Insert the packet after the current one p->next = tmp->next; p->prev = tmp; if (p->next) p->next->prev = p; else stream->tail[direction] = p; tmp->next = p; } } stream->cur_buff_size += cur_stack->plen; if (stream->cur_buff_size >= stream->max_buff_size) { // Buffer overflow debug_stream("thread %p, entry %p, packet %u.%06u, seq %u, ack %u : buffer overflow, forced dequeue", pthread_self(), stream, pom_ptime_sec(pkt->ts), pom_ptime_usec(pkt->ts), seq, ack); if (stream_force_dequeue(stream) != POM_OK) { stream_end_process_packet(stream); return POM_ERR; } } stream_end_process_packet(stream); debug_stream("thread %p, entry %p, packet %u.%06u, seq %u, ack %u : done queued", pthread_self(), stream, pom_ptime_sec(pkt->ts), pom_ptime_usec(pkt->ts), seq, ack); return PROTO_OK; }
static void stream_free_packet(struct stream_pkt *p) { core_stack_release(p->stack); packet_release(p->pkt); free(p); }
static int input_pcap_read(struct input *i) { struct input_pcap_priv *p = i->priv; if (p->type == input_pcap_type_dir && !p->tpriv.dir.files) { if (input_pcap_dir_open(i) != POM_OK) { // Don't error out if the scan was interrupted if (p->tpriv.dir.interrupt_scan) return POM_OK; return POM_ERR; } } struct pcap_pkthdr *phdr; const u_char *data; int result = pcap_next_ex(p->p, &phdr, &data); if (phdr->len > phdr->caplen && !p->warning) { pomlog(POMLOG_WARN "Warning, some packets were truncated at capture time on input %s", i->name); p->warning = 1; } if (result < 0) { // End of file or error if (p->type == input_pcap_type_dir) { if (result != -2) pomlog(POMLOG_WARN "Error while reading packet from file : %s. Moving on the next file ...", pcap_geterr(p->p), p->tpriv.dir.cur_file->filename); pcap_close(p->p); p->p = NULL; p->warning = 0; if (input_pcap_dir_open_next(p) != POM_OK) return POM_ERR; if (!p->tpriv.dir.cur_file) { // No more file return input_stop(i); } result = pcap_next_ex(p->p, &phdr, &data); if (result < 0) { pomlog(POMLOG_ERR "Error while reading first packet of new file"); return POM_ERR; } } else { if (result == -2) // EOF return input_stop(i); pomlog(POMLOG_ERR "Error while reading file : %s", pcap_geterr(p->p)); return POM_ERR; } } if (result == 0) // Timeout return POM_OK; struct packet *pkt = packet_alloc(); if (!pkt) return POM_ERR; if (packet_buffer_alloc(pkt, phdr->caplen - p->skip_offset, p->align_offset) != POM_OK) { packet_release(pkt); return POM_ERR; } pkt->input = i; pkt->datalink = p->datalink_proto; pkt->ts = pom_timeval_to_ptime(phdr->ts); memcpy(pkt->buff, data + p->skip_offset, phdr->caplen - p->skip_offset); unsigned int flags = 0, affinity = 0; if (p->type == input_pcap_type_interface) flags = CORE_QUEUE_DROP_IF_FULL; #ifdef DLT_MPEG_2_TS if (p->datalink_type == DLT_MPEG_2_TS) { // MPEG2 TS has thread affinity based on the PID flags |= CORE_QUEUE_HAS_THREAD_AFFINITY; affinity = ((((char*)pkt->buff)[1] & 0x1F) << 8) | ((char *)pkt->buff)[2]; } #endif return core_queue_packet(pkt, flags, affinity); }
int core_queue_packet(struct packet *p, unsigned int flags, unsigned int thread_affinity) { // Update the counters registry_perf_inc(p->input->perf_pkts_in, 1); registry_perf_inc(p->input->perf_bytes_in, p->len); if (!core_run) return POM_ERR; debug_core("Queuing packet %p (%u.%06u)", p, pom_ptime_sec(p->ts), pom_ptime_usec(p->ts)); // Find the right thread to queue to struct core_processing_thread *t = NULL; if (flags & CORE_QUEUE_HAS_THREAD_AFFINITY) { t = core_processing_threads[thread_affinity % core_num_threads]; pom_mutex_lock(&t->pkt_queue_lock); } else { static volatile unsigned int start = 0; unsigned int i; while (1) { unsigned int thread_id = start; for (i = 0; i < core_num_threads; i++) { thread_id++; if (thread_id >= core_num_threads) thread_id -= core_num_threads; t = core_processing_threads[thread_id]; int res = pthread_mutex_trylock(&t->pkt_queue_lock); if (res == EBUSY) { // Thread is busy, go to the next one continue; } else if (res) { pomlog(POMLOG_ERR "Error while locking a processing thread pkt_queue mutex : %s", pom_strerror(res)); abort(); return POM_ERR; } // We've got the lock, check if it's ok to queue here if (t->pkt_count < CORE_THREAD_PKT_QUEUE_MAX) { // Use this thread break; } // Too many packets pending in this thread, go to the next one pom_mutex_unlock(&t->pkt_queue_lock); } if (i < core_num_threads) { // We locked on a thread start = thread_id; break; } // No thread found if (core_pkt_queue_count >= ((CORE_THREAD_PKT_QUEUE_MAX - 1) * core_num_threads)) { // Queue full if (flags & CORE_QUEUE_DROP_IF_FULL) { packet_release(p); registry_perf_inc(perf_pkt_dropped, 1); debug_core("Dropped packet %p (%u.%06u) to thread %u", p, pom_ptime_sec(p->ts), pom_ptime_usec(p->ts)); return POM_OK; } // We're not going to drop this. Wait then debug_core("All queues full. Waiting ..."); pom_mutex_lock(&core_pkt_queue_wait_lock); // Recheck the count after locking if (core_pkt_queue_count >= ((CORE_THREAD_PKT_QUEUE_MAX - 1) * core_num_threads)) { int res = pthread_cond_wait(&core_pkt_queue_wait_cond, &core_pkt_queue_wait_lock); if (res) { pomlog(POMLOG_ERR "Error while waiting for the core pkt_queue condition : %s", pom_strerror(res)); abort(); } } pom_mutex_unlock(&core_pkt_queue_wait_lock); } } } // We've got the thread's lock, add it to the queue struct core_packet_queue *tmp = NULL; if (t->pkt_queue_unused) { tmp = t->pkt_queue_unused; t->pkt_queue_unused = tmp->next; } else { tmp = malloc(sizeof(struct core_packet_queue)); if (!tmp) { pom_mutex_unlock(&t->pkt_queue_lock); pom_oom(sizeof(struct core_packet_queue)); return POM_ERR; } } tmp->pkt = p; tmp->next = NULL; if (t->pkt_queue_tail) { t->pkt_queue_tail->next = tmp; } else { t->pkt_queue_head = tmp; // The queue was empty, we need to signal it int res = pthread_cond_signal(&t->pkt_queue_cond); if (res) { pomlog(POMLOG_ERR "Error while signaling the thread pkt_queue restart condition : %s", pom_strerror(res)); abort(); return POM_ERR; } } t->pkt_queue_tail = tmp; t->pkt_count++; __sync_fetch_and_add(&core_pkt_queue_count, 1); registry_perf_inc(perf_pkt_queue, 1); debug_core("Queued packet %p (%u.%06u) to thread %u", p, pom_ptime_sec(p->ts), pom_ptime_usec(p->ts), t->thread_id); pom_mutex_unlock(&t->pkt_queue_lock); return POM_OK; }