int proto_expectation_remove(struct proto_expectation *e) { struct proto *proto = e->tail->proto; pom_rwlock_wlock(&proto->expectation_lock); if (!(e->flags & PROTO_EXPECTATION_FLAG_QUEUED)) { pom_rwlock_unlock(&proto->expectation_lock); return POM_ERR; } if (!e->next && !e->prev && proto->expectations != e) { // The expectation is not queued pom_rwlock_unlock(&proto->expectation_lock); return POM_OK; } if (e->next) e->next->prev = e->prev; if (e->prev) e->prev->next = e->next; else proto->expectations = e->next; __sync_fetch_and_and(&e->flags, ~PROTO_EXPECTATION_FLAG_QUEUED); pom_rwlock_unlock(&proto->expectation_lock); registry_perf_dec(e->proto->perf_expt_pending, 1); return POM_OK; }
int proto_expectation_expiry(void *priv, ptime now) { struct proto_expectation *e = priv; struct proto *proto = e->tail->proto; timer_cleanup(e->expiry); pom_rwlock_wlock(&proto->expectation_lock); if (e->next) e->next->prev = e->prev; if (e->prev) e->prev->next = e->next; else proto->expectations = e->next; pom_rwlock_unlock(&proto->expectation_lock); if (e->priv && proto->info->ct_info->cleanup_handler) { if (proto->info->ct_info->cleanup_handler(e->priv) != POM_OK) pomlog(POMLOG_WARN "Unable to free the conntrack priv of the proto_expectation"); } registry_perf_dec(e->proto->perf_expt_pending, 1); proto_expectation_cleanup(e); return POM_OK; }
int proto_expectation_add(struct proto_expectation *e, struct conntrack_session *session, unsigned int expiry, ptime now) { if (!e || !e->tail || !e->tail->proto) { pomlog(POMLOG_ERR "Cannot add expectation as it's incomplete"); return POM_ERR; } e->expiry = timer_alloc(e, proto_expectation_expiry); if (!e->expiry) return POM_ERR; if (timer_queue_now(e->expiry, expiry, now) != POM_OK) return POM_ERR; conntrack_session_refcount_inc(session); e->session = session; struct proto *proto = e->tail->proto; pom_rwlock_wlock(&proto->expectation_lock); e->next = proto->expectations; if (e->next) e->next->prev = e; proto->expectations = e; pom_rwlock_unlock(&proto->expectation_lock); registry_perf_inc(e->proto->perf_expt_pending, 1); return POM_OK; }
int proto_packet_listener_unregister(struct proto_packet_listener *l) { if (!l) return POM_ERR; pom_rwlock_wlock(&l->proto->listeners_lock); if (l->next) l->next->prev = l->prev; if (l->prev) { l->prev->next = l->next; } else { if (l->flags & PROTO_PACKET_LISTENER_PLOAD_ONLY) l->proto->payload_listeners = l->next; else l->proto->packet_listeners = l->next; } pom_rwlock_unlock(&l->proto->listeners_lock); free(l); return POM_OK; }
struct proto_packet_listener *proto_packet_listener_register(struct proto *proto, unsigned int flags, void *object, int (*process) (void *object, struct packet *p, struct proto_process_stack *s, unsigned int stack_index), struct filter_node *f) { struct proto_packet_listener *l = malloc(sizeof(struct proto_packet_listener)); if (!l) { pom_oom(sizeof(struct proto_packet_listener)); return NULL; } memset(l, 0, sizeof(struct proto_packet_listener)); l->flags = flags; l->process = process; l->proto = proto; l->object = object; l->filter = f; pom_rwlock_wlock(&proto->listeners_lock); if (l->flags & PROTO_PACKET_LISTENER_PLOAD_ONLY) l->next = proto->payload_listeners; else l->next = proto->packet_listeners; if (l->next) l->next->prev = l; if (l->flags & PROTO_PACKET_LISTENER_PLOAD_ONLY) proto->payload_listeners = l; else proto->packet_listeners = l; pom_rwlock_unlock(&l->proto->listeners_lock); return l; }
int proto_post_process(struct packet *p, struct proto_process_stack *s, unsigned int stack_index) { if (!s) return PROTO_ERR; struct proto *proto = s[stack_index].proto; if (!proto) return PROTO_ERR; // Process the listeners after the whole stack has been processed struct proto_packet_listener *l; pom_rwlock_rlock(&proto->listeners_lock); for (l = proto->packet_listeners; l; l = l->next) { if (l->filter && !filter_packet_match(l->filter, s)) continue; if (l->process(l->object, p, s, stack_index) != POM_OK) { pomlog(POMLOG_WARN "Warning packet listener failed"); // FIXME remove listener from the list ? } } pom_rwlock_unlock(&proto->listeners_lock); if (proto->info->post_process) return proto->info->post_process(proto->priv, p, s, stack_index); return POM_OK; }
int proto_expectation_add(struct proto_expectation *e) { if (!e || !e->tail || !e->tail->proto) { pomlog(POMLOG_ERR "Cannot add expectation as it's incomplete"); return POM_ERR; } if (e->flags & PROTO_EXPECTATION_FLAG_QUEUED) return POM_ERR; struct proto *proto = e->tail->proto; pom_rwlock_wlock(&proto->expectation_lock); __sync_fetch_and_or(&e->flags, PROTO_EXPECTATION_FLAG_QUEUED); e->next = proto->expectations; if (e->next) e->next->prev = e; proto->expectations = e; pom_rwlock_unlock(&proto->expectation_lock); registry_perf_inc(e->proto->perf_expt_pending, 1); return POM_OK; }
void core_assert_is_paused() { if (!core_run) // Core is not yet running return; int res = pthread_rwlock_trywrlock(&core_processing_lock); if (res == EDEADLK || res == EBUSY) return; pom_rwlock_unlock(&core_processing_lock); pomlog(POMLOG_ERR "Error, core processing should be locked while it's not !"); abort(); }
int proto_process_pload_listeners(struct packet *p, struct proto_process_stack *stack, unsigned int stack_index) { // Process payload listeners of the previous proto struct proto_process_stack *s = &stack[stack_index]; struct proto_process_stack *s_next = &stack[stack_index + 1]; struct proto *proto = s->proto; if (proto && s_next->plen) { struct proto_packet_listener *l; pom_rwlock_rlock(&proto->listeners_lock); for (l = proto->payload_listeners; l; l = l->next) { if (l->filter && !filter_packet_match(l->filter, stack)) continue; if (l->process(l->object, p, stack, stack_index + 1) != POM_OK) { pomlog(POMLOG_WARN "Warning payload listener failed"); // FIXME remove listener from the list ? } } pom_rwlock_unlock(&proto->listeners_lock); } return POM_OK; }
void proto_packet_listener_set_filter(struct proto_packet_listener *l, struct filter_node *f) { pom_rwlock_wlock(&l->proto->listeners_lock); l->filter = f; pom_rwlock_unlock(&l->proto->listeners_lock); }
int proto_process(struct packet *p, struct proto_process_stack *stack, unsigned int stack_index) { struct proto_process_stack *s = &stack[stack_index]; struct proto *proto = s->proto; if (!proto || !proto->info->process) return PROTO_ERR; int res = proto->info->process(proto->priv, p, stack, stack_index); registry_perf_inc(proto->perf_pkts, 1); registry_perf_inc(proto->perf_bytes, s->plen); if (res != PROTO_OK) return res; // Process the expectations ! pom_rwlock_rlock(&proto->expectation_lock); struct proto_expectation *e = proto->expectations; while (e) { int expt_dir = POM_DIR_UNK; struct proto_expectation_stack *es = e->tail; struct ptype *fwd_value = s->pkt_info->fields_value[s->proto->info->ct_info->fwd_pkt_field_id]; struct ptype *rev_value = s->pkt_info->fields_value[s->proto->info->ct_info->rev_pkt_field_id]; if ((!es->fields[POM_DIR_FWD] || ptype_compare_val(PTYPE_OP_EQ, es->fields[POM_DIR_FWD], fwd_value)) && (!es->fields[POM_DIR_REV] || ptype_compare_val(PTYPE_OP_EQ, es->fields[POM_DIR_REV], rev_value))) { // Expectation matched the forward direction expt_dir = POM_DIR_FWD; } else if ((!es->fields[POM_DIR_FWD] || ptype_compare_val(PTYPE_OP_EQ, es->fields[POM_DIR_FWD], rev_value)) && (!es->fields[POM_DIR_REV] || ptype_compare_val(PTYPE_OP_EQ, es->fields[POM_DIR_REV], fwd_value))) { // Expectation matched the reverse direction expt_dir = POM_DIR_REV; } if (expt_dir == POM_DIR_UNK) { // Expectation not matched e = e->next; continue; } es = es->prev; int stack_index_tmp = stack_index - 1; while (es) { struct proto_process_stack *s_tmp = &stack[stack_index_tmp]; if (s_tmp->proto != es->proto) { e = e->next; continue; } fwd_value = s_tmp->pkt_info->fields_value[s_tmp->proto->info->ct_info->fwd_pkt_field_id]; rev_value = s_tmp->pkt_info->fields_value[s_tmp->proto->info->ct_info->rev_pkt_field_id]; if (expt_dir == POM_DIR_FWD) { if ((es->fields[POM_DIR_FWD] && !ptype_compare_val(PTYPE_OP_EQ, es->fields[POM_DIR_FWD], fwd_value)) || (es->fields[POM_DIR_REV] && !ptype_compare_val(PTYPE_OP_EQ, es->fields[POM_DIR_REV], rev_value))) { e = e->next; continue; } } else { if ((es->fields[POM_DIR_FWD] && !ptype_compare_val(PTYPE_OP_EQ, es->fields[POM_DIR_FWD], rev_value)) || (es->fields[POM_DIR_REV] && !ptype_compare_val(PTYPE_OP_EQ, es->fields[POM_DIR_REV], fwd_value))) { e = e->next; continue; } } es = es->prev; stack_index_tmp--; } // Expectation matched ! // Relock with write access pom_rwlock_unlock(&proto->expectation_lock); pom_rwlock_wlock(&proto->expectation_lock); debug_expectation("Expectation %p matched !", e); // Remove it from the list if (e->next) e->next->prev = e->prev; if (e->prev) e->prev->next = e->next; else proto->expectations = e->next; struct proto_process_stack *s_next = &stack[stack_index + 1]; s_next->proto = e->proto; if (conntrack_get_unique_from_parent(stack, stack_index + 1) != POM_OK) { proto_expectation_cleanup(e); return PROTO_ERR; } s_next->ce->priv = e->priv; if (conntrack_session_bind(s_next->ce, e->session)) { proto_expectation_cleanup(e); return PROTO_ERR; } registry_perf_dec(e->proto->perf_expt_pending, 1); registry_perf_inc(e->proto->perf_expt_matched, 1); proto_expectation_cleanup(e); conntrack_unlock(s_next->ce); break; } pom_rwlock_unlock(&proto->expectation_lock); return res; }
void *core_processing_thread_func(void *priv) { struct core_processing_thread *tpriv = priv; if (packet_info_pool_init()) { halt("Error while initializing the packet_info_pool", 1); return NULL; } registry_perf_inc(perf_thread_active, 1); pom_mutex_lock(&tpriv->pkt_queue_lock); while (core_run) { while (!tpriv->pkt_queue_head) { // We are not active while waiting for a packet registry_perf_dec(perf_thread_active, 1); debug_core("thread %u : waiting", tpriv->thread_id); if (registry_perf_getval(perf_thread_active) == 0) { if (core_get_state() == core_state_finishing) core_set_state(core_state_idle); } if (!core_run) { pom_mutex_unlock(&tpriv->pkt_queue_lock); goto end; } int res = pthread_cond_wait(&tpriv->pkt_queue_cond, &tpriv->pkt_queue_lock); if (res) { pomlog(POMLOG_ERR "Error while waiting for restart condition : %s", pom_strerror(res)); abort(); return NULL; } registry_perf_inc(perf_thread_active, 1); } // Dequeue a packet struct core_packet_queue *tmp = tpriv->pkt_queue_head; tpriv->pkt_queue_head = tmp->next; if (!tpriv->pkt_queue_head) tpriv->pkt_queue_tail = NULL; // Add it to the unused list tmp->next = tpriv->pkt_queue_unused; tpriv->pkt_queue_unused = tmp; tpriv->pkt_count--; registry_perf_dec(perf_pkt_queue, 1); __sync_fetch_and_sub(&core_pkt_queue_count, 1); if (tpriv->pkt_count < CORE_THREAD_PKT_QUEUE_MIN) { pom_mutex_lock(&core_pkt_queue_wait_lock); // Tell the input processes that they can continue queuing packets int res = pthread_cond_broadcast(&core_pkt_queue_wait_cond); if (res) { pomlog(POMLOG_ERR "Error while signaling the main pkt_queue condition : %s", pom_strerror(res)); abort(); } pom_mutex_unlock(&core_pkt_queue_wait_lock); } // Keep track of our packet struct packet *pkt = tmp->pkt; debug_core("thread %u : Processing packet %p (%u.%06u)", tpriv->thread_id, pkt, pom_ptime_sec(pkt->ts), pom_ptime_usec(pkt->ts)); pom_mutex_unlock(&tpriv->pkt_queue_lock); // Lock the processing lock pom_rwlock_rlock(&core_processing_lock); // Update the current clock if (core_clock[tpriv->thread_id] < pkt->ts) // Make sure we keep it monotonous core_clock[tpriv->thread_id] = pkt->ts; //pomlog(POMLOG_DEBUG "Thread %u processing ...", pthread_self()); if (core_process_packet(pkt) == POM_ERR) { core_run = 0; pom_rwlock_unlock(&core_processing_lock); break; } // Process timers if (timers_process() != POM_OK) { pom_rwlock_unlock(&core_processing_lock); break; } pom_rwlock_unlock(&core_processing_lock); if (packet_release(pkt) != POM_OK) { pomlog(POMLOG_ERR "Error while releasing the packet"); break; } debug_core("thread %u : Processed packet %p (%u.%06u)", tpriv->thread_id, pkt, pom_ptime_sec(pkt->ts), pom_ptime_usec(pkt->ts)); // Re-lock our queue for the next run pom_mutex_lock(&tpriv->pkt_queue_lock); } halt("Processing thread encountered an error", 1); end: packet_info_pool_cleanup(); return NULL; }
int proto_process(struct packet *p, struct proto_process_stack *stack, unsigned int stack_index) { struct proto_process_stack *s = &stack[stack_index]; struct proto *proto = s->proto; if (!proto || !proto->info->process) return PROTO_ERR; int res = proto->info->process(proto->priv, p, stack, stack_index); registry_perf_inc(proto->perf_pkts, 1); registry_perf_inc(proto->perf_bytes, s->plen); if (res != PROTO_OK) return res; int matched = 0; // Process the expectations ! pom_rwlock_rlock(&proto->expectation_lock); struct proto_expectation *e = NULL; for (e = proto->expectations; e; e = e->next) { if (e->flags & PROTO_EXPECTATION_FLAG_MATCHED) { // Another thread already matched the expectation, continue continue; } // Bit one means it matches the forward direction // Bit two means it matches the reverse direction int expt_dir = 3; struct proto_expectation_stack *es = e->tail; int stack_index_tmp = stack_index; while (es) { struct proto_process_stack *s_tmp = &stack[stack_index_tmp]; if (s_tmp->proto != es->proto) { expt_dir = 0; break; } struct ptype *fwd_value = s_tmp->pkt_info->fields_value[s_tmp->proto->info->ct_info->fwd_pkt_field_id]; struct ptype *rev_value = s_tmp->pkt_info->fields_value[s_tmp->proto->info->ct_info->rev_pkt_field_id]; if (expt_dir & 1) { if ((es->fields[POM_DIR_FWD] && !ptype_compare_val(PTYPE_OP_EQ, es->fields[POM_DIR_FWD], fwd_value)) || (es->fields[POM_DIR_REV] && !ptype_compare_val(PTYPE_OP_EQ, es->fields[POM_DIR_REV], rev_value))) { expt_dir &= ~1; // It doesn't match in the forward direction } } if (expt_dir & 2) { if ((es->fields[POM_DIR_FWD] && !ptype_compare_val(PTYPE_OP_EQ, es->fields[POM_DIR_FWD], rev_value)) || (es->fields[POM_DIR_REV] && !ptype_compare_val(PTYPE_OP_EQ, es->fields[POM_DIR_REV], fwd_value))) { expt_dir &= ~2; } } if (!expt_dir) break; es = es->prev; stack_index_tmp--; } if (expt_dir) { // It matched if (!(__sync_fetch_and_or(&e->flags, PROTO_EXPECTATION_FLAG_MATCHED) & PROTO_EXPECTATION_FLAG_MATCHED)) { // Something matched matched++; } } } pom_rwlock_unlock(&proto->expectation_lock); if (!matched) return POM_OK; // At least one expectation matched ! debug_expectation("%u expectation matched !", matched); // Relock with write access pom_rwlock_wlock(&proto->expectation_lock); e = proto->expectations; while (e) { struct proto_expectation *cur = e; e = e->next; if (!(cur->flags & PROTO_EXPECTATION_FLAG_MATCHED)) continue; // Remove the expectation from the conntrack if (cur->next) cur->next->prev = cur->prev; if (cur->prev) cur->prev->next = cur->next; else proto->expectations = cur->next; // Remove matched and queued flags __sync_fetch_and_and(&cur->flags, ~(PROTO_EXPECTATION_FLAG_MATCHED | PROTO_EXPECTATION_FLAG_QUEUED)); struct proto_process_stack *s_next = &stack[stack_index + 1]; s_next->proto = cur->proto; if (conntrack_get_unique_from_parent(stack, stack_index + 1) != POM_OK) { proto_expectation_cleanup(cur); continue; } if (!s_next->ce->priv) { s_next->ce->priv = cur->priv; // Prevent cleanup of private data while cleaning the expectation cur->priv = NULL; } if (cur->session) { if (conntrack_session_bind(s_next->ce, cur->session)) { proto_expectation_cleanup(cur); continue; } } registry_perf_dec(cur->proto->perf_expt_pending, 1); registry_perf_inc(cur->proto->perf_expt_matched, 1); if (cur->match_callback) { // Call the callback with the conntrack locked cur->match_callback(cur, cur->callback_priv, s_next->ce); // Nullify callback_priv so it doesn't get cleaned up cur->callback_priv = NULL; } if (cur->expiry) { // The expectation was added using 'add_and_cleanup' function proto_expectation_cleanup(cur); } conntrack_unlock(s_next->ce); } pom_rwlock_unlock(&proto->expectation_lock); return res; }