Example #1
0
int output_tap_pkt_process(void *obj, struct packet *p, struct proto_process_stack *stack, unsigned int stack_index) {

	struct output_tap_priv *priv = obj;

	struct proto_process_stack *s = &stack[stack_index];

	ssize_t wres = 0;
	size_t size = s->plen, pos = 0;
	while (size > 0) {
		wres = write(priv->fd, s->pload + pos, size);
		if (wres == -1) {
			pomlog(POMLOG_ERR "Error while writing to the tap interface %s : %s", PTYPE_STRING_GETVAL(priv->p_ifname), pom_strerror(errno));
			return POM_ERR;
		}
		pos += wres;
		size -= wres;
	}

	if (priv->perf_pkts_out)
		registry_perf_inc(priv->perf_pkts_out, 1);
	if (priv->perf_bytes_out)
		registry_perf_inc(priv->perf_bytes_out, s->plen);

	return POM_OK;
}
Example #2
0
static int proto_ipv4_fragment_cleanup(struct conntrack_entry *ce, void *priv, ptime now) {

	struct proto_ipv4_fragment *f = priv;

	// Remove the frag from the conntrack
	if (f->prev)
		f->prev->next = f->next;
	else
		ce->priv = f->next;

	if (f->next)
		f->next->prev = f->prev;

	conntrack_unlock(ce);


	if (!(f->flags & PROTO_IPV4_FLAG_PROCESSED)) {
		registry_perf_inc(perf_frags_dropped, f->count);
	}

	if (f->multipart)
		packet_multipart_cleanup(f->multipart);
	
	if (f->t)
		conntrack_timer_cleanup(f->t);
	
	free(f);

	return POM_OK;

}
Example #3
0
File: event.c Project: k0a1a/pom-ng
int event_process_begin(struct event *evt, struct proto_process_stack *stack, int stack_index, ptime ts) {

	debug_event("Processing event begin %s", evt->reg->info->name);

	if (evt->flags & EVENT_FLAG_PROCESS_BEGAN) {
		pomlog(POMLOG_ERR "Internal error: event %s already processed", evt->reg->info->name);
		return POM_ERR;
	}

	event_refcount_inc(evt);

	if (stack)
		evt->ce = stack[stack_index].ce;

	evt->ts = ts;

	struct event_listener *lst;
	for (lst = evt->reg->listeners; lst; lst = lst->next) {
		if (lst->process_begin && lst->process_begin(evt, lst->obj, stack, stack_index) != POM_OK) {
			pomlog(POMLOG_WARN "An error occured while processing begining of event %s", evt->reg->info->name);
		}
	}

	evt->flags |= EVENT_FLAG_PROCESS_BEGAN;

	registry_perf_inc(evt->reg->perf_ongoing, 1);

	return POM_OK;
}
Example #4
0
int packet_buffer_alloc(struct packet *pkt, size_t size, size_t align_offset) {

    if (align_offset >= PACKET_BUFFER_ALIGNMENT) {
        pomlog(POMLOG_ERR "Alignment offset too big");
        return POM_ERR;
    }

    size_t tot_size = size + align_offset + PACKET_BUFFER_ALIGNMENT + sizeof(struct packet_buffer);

    struct packet_buffer *pb = malloc(tot_size);
    if (!pb) {
        pom_oom(tot_size);
        return POM_ERR;
    }
    memset(pb, 0, tot_size);

    pb->base_buff = (void*)pb + sizeof(struct packet_buffer);
    pb->aligned_buff = (void*) (((long)pb->base_buff & ~(PACKET_BUFFER_ALIGNMENT - 1)) + PACKET_BUFFER_ALIGNMENT + align_offset);
    pb->buff_size = tot_size;

    pkt->pkt_buff = pb;
    pkt->len = size;
    pkt->buff = pb->aligned_buff;


    registry_perf_inc(perf_pkt_buff, tot_size);

    return POM_OK;
}
Example #5
0
int proto_expectation_add(struct proto_expectation *e, struct conntrack_session *session, unsigned int expiry, ptime now) {

    if (!e || !e->tail || !e->tail->proto) {
        pomlog(POMLOG_ERR "Cannot add expectation as it's incomplete");
        return POM_ERR;
    }

    e->expiry = timer_alloc(e, proto_expectation_expiry);
    if (!e->expiry)
        return POM_ERR;

    if (timer_queue_now(e->expiry, expiry, now) != POM_OK)
        return POM_ERR;

    conntrack_session_refcount_inc(session);
    e->session = session;

    struct proto *proto = e->tail->proto;
    pom_rwlock_wlock(&proto->expectation_lock);

    e->next = proto->expectations;
    if (e->next)
        e->next->prev = e;

    proto->expectations = e;

    pom_rwlock_unlock(&proto->expectation_lock);

    registry_perf_inc(e->proto->perf_expt_pending, 1);

    return POM_OK;
}
Example #6
0
static int file_pload_open(struct output_file_priv *output_priv, const char *filename, void **pload_priv) {

	// Create the private structure for the payload
	struct output_file_pload_priv *ppriv = malloc(sizeof(struct output_file_pload_priv));
	if (!ppriv) {
		pom_oom(sizeof(struct output_file_pload_priv));
		return POM_ERR;
	}
	memset(ppriv, 0, sizeof(struct output_file_pload_priv));

	ppriv->filename = strdup(filename);
	if (!ppriv->filename) {
		free(ppriv);
		pom_oom(strlen(filename) + 1);
		return POM_ERR;
	}

	ppriv->fd = pom_open(filename, O_WRONLY | O_CREAT, 0666);
	if (ppriv->fd == -1) {
		free(ppriv);
		return POM_ERR;
	}

	if (output_priv && output_priv->perf_files_open)
		registry_perf_inc(output_priv->perf_files_open, 1);
		

	pomlog(POMLOG_DEBUG "File %s open", ppriv->filename);

	*pload_priv = ppriv;

	return POM_OK;
}
Example #7
0
int proto_expectation_add(struct proto_expectation *e) {

	if (!e || !e->tail || !e->tail->proto) {
		pomlog(POMLOG_ERR "Cannot add expectation as it's incomplete");
		return POM_ERR;
	}

	if (e->flags & PROTO_EXPECTATION_FLAG_QUEUED)
		return POM_ERR;

	struct proto *proto = e->tail->proto;
	pom_rwlock_wlock(&proto->expectation_lock);

	__sync_fetch_and_or(&e->flags, PROTO_EXPECTATION_FLAG_QUEUED);

	e->next = proto->expectations;
	if (e->next)
		e->next->prev = e;

	proto->expectations = e;

	pom_rwlock_unlock(&proto->expectation_lock);

	registry_perf_inc(e->proto->perf_expt_pending, 1);

	return POM_OK;
}
Example #8
0
File: event.c Project: k0a1a/pom-ng
int event_add_listener(struct event *evt, void *obj, int (*process_begin) (struct event *evt, void *obj, struct proto_process_stack *stack, unsigned int stack_index), int (*process_end) (struct event *evt, void *obj)) {

	if (process_begin && process_begin(evt, obj, NULL, 0) != POM_OK)
		return POM_ERR;

	struct event_listener *tmp = malloc(sizeof(struct event_listener));
	if (!tmp) {
		pom_oom(sizeof(struct event_listener));
		return POM_ERR;
	}
	memset(tmp, 0, sizeof(struct event_listener));
	
	tmp->obj = obj;
	tmp->process_end = process_end;

	tmp->next = evt->tmp_listeners;
	if (tmp->next)
		tmp->next->prev = tmp;
	evt->tmp_listeners = tmp;

	registry_perf_inc(evt->reg->perf_listeners, 1);

	return POM_OK;

}
Example #9
0
struct packet *packet_alloc() {

    struct packet *tmp = malloc(sizeof(struct packet));
    memset(tmp, 0, sizeof(struct packet));

    // Init the refcount
    tmp->refcount = 1;

    registry_perf_inc(perf_pkt_in_use, 1);

    return tmp;
}
Example #10
0
int output_file_pload_write(void *output_priv, void *pload_instance_priv, void *data, size_t len) {

	struct output_file_priv *priv = output_priv;
	struct output_file_pload_priv *ppriv = pload_instance_priv;
	int res = pom_write(ppriv->fd, data, len);
	if (res == POM_ERR)
		pomlog(POMLOG_ERR "Error while writing to file %s : %s", ppriv->filename, pom_strerror(errno));
	else if (priv && priv->perf_bytes_written)
		registry_perf_inc(priv->perf_bytes_written, len);
		

	return res;

}
Example #11
0
static int output_inject_process(void *obj, struct packet *p, struct proto_process_stack *s, unsigned int stack_index) {

	struct output_inject_priv *priv = obj;

	struct proto_process_stack *stack = &s[stack_index];

 	size_t len = stack->plen;
	if (len > 1500)
		len = 1500;

	int bytes = pcap_inject(priv->p, stack->pload, len);

	if (bytes == -1) {
		pomlog(POMLOG_ERR "Error while injecting packet : %s", pcap_geterr(priv->p));
		return POM_ERR;
	}

	registry_perf_inc(priv->perf_pkts_out, 1);
	registry_perf_inc(priv->perf_bytes_out, stack->plen);


	return POM_OK;

}
Example #12
0
struct timer *timer_alloc(void* priv, int (*handler) (void*, ptime)) {

	struct timer *t;
	t = malloc(sizeof(struct timer));
	if (!t) {
		pom_oom(sizeof(struct timer));
		return NULL;
	}
	memset(t, 0, sizeof(struct timer));

	t->priv = priv;
	t->handler = handler;

	registry_perf_inc(perf_timer_allocated, 1);

	return t;
}
Example #13
0
int output_file_pload_close(void *output_priv, void *pload_instance_priv) {

	struct output_file_pload_priv *ppriv = pload_instance_priv;
	int fd = ppriv->fd;
	pomlog(POMLOG_DEBUG "File %s closed", ppriv->filename);
	free(ppriv->filename);
	free(ppriv);

	struct output_file_priv *priv = output_priv;
	if (priv) {
		if (priv->perf_files_open)
			registry_perf_dec(priv->perf_files_open, 1);
		if (priv->perf_files_closed)
			registry_perf_inc(priv->perf_files_closed, 1);
	}

	return close(fd);
}
Example #14
0
File: event.c Project: k0a1a/pom-ng
int event_listener_register(struct event_reg *evt_reg, void *obj, int (*process_begin) (struct event *evt, void *obj, struct proto_process_stack *stack, unsigned int stack_index), int (*process_end) (struct event *evt, void *obj)) {

	struct event_listener *lst;
	for (lst = evt_reg->listeners; lst && lst->obj != obj; lst = lst->next);

	if (lst) {
		pomlog(POMLOG_ERR "Event %s is already being listened to by obj %p", evt_reg->info->name, obj);
		return POM_ERR;
	}
	
	
	lst = malloc(sizeof(struct event_listener));
	if (!lst) {
		pom_oom(sizeof(struct event_listener));
		return POM_ERR;

	}
	memset(lst, 0, sizeof(struct event_listener));
	
	lst->obj = obj;
	lst->process_begin = process_begin;
	lst->process_end = process_end;
	
	lst->next = evt_reg->listeners;
	if (lst->next)
		lst->next->prev = lst;

	evt_reg->listeners = lst;

	if (!lst->next) {
		// Got a listener now, notify
		if (evt_reg->info->listeners_notify && evt_reg->info->listeners_notify(evt_reg->info->source_obj, evt_reg, 1) != POM_OK) {
			pomlog(POMLOG_ERR "Error while notifying event object about new listener");
			evt_reg->listeners = NULL;
			free(lst);
			return POM_ERR;
		}
	}

	registry_perf_inc(evt_reg->perf_listeners, 1);
	

	return POM_OK;
}
Example #15
0
File: event.c Project: k0a1a/pom-ng
int event_process_end(struct event *evt) {

	debug_event("Processing event end %s", evt->reg->info->name);

	if (!(evt->flags & EVENT_FLAG_PROCESS_BEGAN)) {
		pomlog(POMLOG_ERR "Internal error: event %s processing hasn't begun", evt->reg->info->name);
		event_cleanup(evt);
		return POM_ERR;
	}

	if (evt->flags & EVENT_FLAG_PROCESS_DONE) {
		pomlog(POMLOG_ERR "Internal error: event %s has already been processed entirely", evt->reg->info->name);
		event_cleanup(evt);
		return POM_ERR;
	}


	struct event_listener *lst;
	for (lst = evt->reg->listeners; lst; lst = lst->next) {
		if (lst->process_end && lst->process_end(evt, lst->obj) != POM_OK) {
			pomlog(POMLOG_WARN "An error occured while processing event %s", evt->reg->info->name);
		}
	}

	for (lst = evt->tmp_listeners; lst; lst = lst->next) {
		if (lst->process_end && lst->process_end(evt, lst->obj) != POM_OK) {
			pomlog(POMLOG_WARN "An error occured while processing event %s", evt->reg->info->name);
		}
		registry_perf_dec(evt->reg->perf_listeners, 1);
	}
	
	evt->ce = NULL;

	evt->flags |= EVENT_FLAG_PROCESS_DONE;

	registry_perf_dec(evt->reg->perf_ongoing, 1);
	registry_perf_inc(evt->reg->perf_processed, 1);

	return event_refcount_dec(evt);
}
Example #16
0
static int proto_ipv4_conntrack_cleanup(void *ce_priv) {

	struct proto_ipv4_fragment *frag_list = ce_priv;

	while (frag_list) {
		struct proto_ipv4_fragment *f = frag_list;
		frag_list = f->next;

		if (!(f->flags & PROTO_IPV4_FLAG_PROCESSED)) {
			registry_perf_inc(perf_frags_dropped, f->count);
		}

		if (f->multipart)
			packet_multipart_cleanup(f->multipart);
		
		if (f->t)
			conntrack_timer_cleanup(f->t);
		
		free(f);

	}

	return POM_OK;
}
Example #17
0
int conntrack_get_unique(struct proto_process_stack *stack, unsigned int stack_index) {

	struct proto_process_stack *s = &stack[stack_index];
	struct proto_process_stack *s_prev = &stack[stack_index - 1];
	if (s_prev->ce) {
		pomlog(POMLOG_ERR "conntrack_get_unique() can only be used for link protocols");
		return POM_ERR;
	}

	if (s->ce) { // This should only occur in the case that an expectation matched
		// Make sure the conntrack is locked
		int res = pthread_mutex_trylock(&s->ce->lock);
		if (res && res != EBUSY && res == EDEADLK) {
			pomlog(POMLOG_ERR "Error while locking the conntrack : %s", pom_strerror(res));
			return POM_ERR;
		}
		return POM_OK;
	}

	struct conntrack_tables *ct = s->proto->ct;
	pom_mutex_lock(&ct->locks[0]);

	struct conntrack_list *lst = ct->table[0];

	for (lst = ct->table[0]; lst && lst->ce->parent; lst = lst->next);

	if (lst) {
		// Conntrack found
		s->ce = lst->ce;
		pom_mutex_unlock(&ct->locks[0]);
	} else {
		// Alloc the conntrack
		struct conntrack_entry *res = NULL;
		res = malloc(sizeof(struct conntrack_entry));
		if (!res) {
			pom_oom(sizeof(struct conntrack_entry));
			pom_mutex_unlock(&ct->locks[0]);
			return POM_ERR;
		}

		memset(res, 0, sizeof(struct conntrack_entry));
		res->proto = s->proto;

		if (pom_mutex_init_type(&res->lock, PTHREAD_MUTEX_ERRORCHECK) != POM_OK) {
			pom_mutex_unlock(&ct->locks[0]);
			return POM_ERR;
		}

		// Alloc the list node
		lst = malloc(sizeof(struct conntrack_list));
		if (!lst) {
			pom_oom(sizeof(struct conntrack_list));
			pom_mutex_unlock(&ct->locks[0]);
			return POM_ERR;
		}
		memset(lst, 0, sizeof(struct conntrack_list));
		lst->ce = res;

		// Add the conntrack to the table
		lst->next = ct->table[0];
		if (lst->next)
			lst->next->prev = lst;
		ct->table[0] = lst;
		pom_mutex_unlock(&ct->locks[0]);
		debug_conntrack("Allocated unique conntrack %p", res);

		registry_perf_inc(s->proto->perf_conn_cur, 1);
		registry_perf_inc(s->proto->perf_conn_tot, 1);
		s->ce = res;
	}

	conntrack_lock(s->ce);
	s->ce->refcount++;

	struct proto_process_stack *s_next = &stack[stack_index + 1];
	s_next->direction = s->direction;

	return POM_OK;

}
Example #18
0
int output_log_xml_process(struct event *evt, void *obj) {

    struct output_log_xml_priv *priv = obj;
    struct event_reg_info *evt_info = event_get_info(evt);

    xmlBufferPtr buff = xmlBufferCreate();
    if (!buff) {
        pomlog(POMLOG_ERR "Error while creating the xml buffer");
        return POM_ERR;
    }

    xmlTextWriterPtr writer = xmlNewTextWriterMemory(buff, 0);
    if (!writer) {
        pomlog(POMLOG_ERR "Error while creating the xmlTextWriter");
        xmlBufferFree(buff);
        return POM_ERR;
    }

    // <event name="event_name">

    char timestamp[21] = { 0 };
    snprintf(timestamp, 20, "%"PRIu64, (uint64_t) event_get_timestamp(evt));

    if (xmlTextWriterWriteString(writer, BAD_CAST "\n") < 0 ||
            xmlTextWriterStartElement(writer, BAD_CAST "event") < 0 ||
            xmlTextWriterWriteAttribute(writer, BAD_CAST "name", BAD_CAST evt_info->name) < 0 ||
            xmlTextWriterWriteAttribute(writer, BAD_CAST "timestamp", BAD_CAST timestamp) < 0)
        goto err;

    struct data *evt_data = event_get_data(evt);

    int i;
    for (i = 0; i < evt_info->data_reg->data_count; i++) {
        if (evt_info->data_reg->items[i].flags & DATA_REG_FLAG_LIST) {
            // Got a data_list

            if (!evt_data[i].items)
                continue;

            // <data_list name="data_name">
            if (xmlTextWriterWriteString(writer, BAD_CAST "\n\t") < 0 ||
                    xmlTextWriterStartElement(writer, BAD_CAST "data_list") < 0 ||
                    xmlTextWriterWriteAttribute(writer, BAD_CAST "name", BAD_CAST evt_info->data_reg->items[i].name) < 0)
                goto err;

            // <value key="key1">
            struct data_item *itm = evt_data[i].items;
            for (; itm; itm = itm->next) {
                if (xmlTextWriterWriteString(writer, BAD_CAST "\n\t\t") < 0 ||
                        xmlTextWriterStartElement(writer, BAD_CAST "value") < 0 ||
                        xmlTextWriterWriteAttribute(writer, BAD_CAST "key", BAD_CAST itm->key) < 0)
                    goto err;

                char *value = ptype_print_val_alloc(itm->value, NULL);
                if (!value)
                    goto err;

                if (xmlTextWriterWriteString(writer, BAD_CAST value) < 0) {
                    free(value);
                    goto err;
                }

                free(value);

                // </value>
                if (xmlTextWriterEndElement(writer) < 0)
                    goto err;

            }


            // </data_list>
            if (xmlTextWriterWriteString(writer, BAD_CAST "\n\t") < 0 ||
                    xmlTextWriterEndElement(writer) < 0)
                goto err;

        } else {

            // Got a single data

            if (!data_is_set(evt_data[i]))
                continue;


            // <data name="data_name">

            if (xmlTextWriterWriteString(writer, BAD_CAST "\n\t") < 0 ||
                    xmlTextWriterStartElement(writer, BAD_CAST "data") < 0 ||
                    xmlTextWriterWriteAttribute(writer, BAD_CAST "name", BAD_CAST evt_info->data_reg->items[i].name) < 0)
                goto err;

            if (evt_data[i].value) {
                char *value = ptype_print_val_alloc(evt_data[i].value, NULL);
                if (!value)
                    goto err;

                if (xmlTextWriterWriteString(writer, BAD_CAST value) < 0) {
                    free(value);
                    goto err;
                }

                free(value);
            }

            // </data>

            if (xmlTextWriterEndElement(writer) < 0)
                goto err;
        }
    }

    // </event>
    if (xmlTextWriterWriteString(writer, BAD_CAST "\n") < 0 ||
            xmlTextWriterEndElement(writer) < 0 ||
            xmlTextWriterWriteString(writer, BAD_CAST "\n") < 0)
        goto err;

    xmlFreeTextWriter(writer);

    if (pom_write(priv->fd, buff->content, buff->use) != POM_OK) {
        pomlog(POMLOG_ERR "Error while writing to the log file");
        xmlBufferFree(buff);
        return POM_ERR;
    }

    xmlBufferFree(buff);

    if (priv->perf_events)
        registry_perf_inc(priv->perf_events, 1);

    return POM_OK;
err:
    pomlog(POMLOG_ERR "An error occured while processing the event");
    xmlFreeTextWriter(writer);
    xmlBufferFree(buff);

    return POM_ERR;

}
Example #19
0
int conntrack_get_unique_from_parent(struct proto_process_stack *stack, unsigned int stack_index) {

	struct conntrack_node_list *child = NULL;
	struct conntrack_list *lst = NULL;

	struct proto_process_stack *s = &stack[stack_index];
	struct proto_process_stack *s_prev = &stack[stack_index - 1];

	struct conntrack_entry *parent = s_prev->ce;

	if (!s->proto) {
		pomlog(POMLOG_ERR "Cannot allocate conntrack for NULL proto");
		return POM_ERR;
	}

	if (!parent) {
		pomlog(POMLOG_ERR "Cannot allocate unique conntrack without a parent");
		return POM_ERR;
	}


	if (s->ce) { // This should only occur in the case that an expectation matched
		// Make sure the conntrack is locked
		int res = pthread_mutex_trylock(&s->ce->lock);
		if (res && res != EBUSY && res == EDEADLK) {
			pomlog(POMLOG_ERR "Error while locking the conntrack : %s", pom_strerror(res));
			return POM_ERR;
		}
		return POM_OK;
	}

	conntrack_lock(parent);

	struct conntrack_tables *ct = s->proto->ct;
	struct conntrack_entry *res = NULL;

	// Look for the conntrack
	
	if (parent->children) {
		struct conntrack_node_list *child = parent->children;
		for (child = parent->children; child && child->ce->proto != s->proto; child = child->next);
		if (child)
			res = child->ce;
	} 

	if (!res) {

		// Alloc the conntrack
		res = malloc(sizeof(struct conntrack_entry));
		if (!res) {
			pom_oom(sizeof(struct conntrack_entry));
			goto err;
		}

		memset(res, 0, sizeof(struct conntrack_entry));
		res->proto = s->proto;

		if (pom_mutex_init_type(&res->lock, PTHREAD_MUTEX_ERRORCHECK) != POM_OK)
			goto err;

		// Alloc the child list
		child = malloc(sizeof(struct conntrack_node_list));
		if (!child) 
			goto err;
		
		memset(child, 0, sizeof(struct conntrack_node_list));
		child->ce = res;
		child->ct = ct;

		// Alloc the parent node
		res->parent = malloc(sizeof(struct conntrack_node_list));
		if (!res->parent) {
			free(child);
			goto err;
		}
		memset(res->parent, 0, sizeof(struct conntrack_node_list));
		res->parent->ce = parent;
		res->parent->ct = parent->proto->ct;
		res->parent->hash = parent->hash;

		// Alloc the list node
		lst = malloc(sizeof(struct conntrack_list));
		if (!lst) {
			pom_oom(sizeof(struct conntrack_list));
			goto err;
		}
		memset(lst, 0, sizeof(struct conntrack_list));
		lst->ce = res;

		// Add the child to the parent
		child->next = parent->children;
		if (child->next)
			child->next->prev = child;
		parent->children = child;

		// Add the conntrack to the table
		pom_mutex_lock(&ct->locks[0]);
		lst->next = ct->table[0];
		if (lst->next)
			lst->next->prev = lst;
		ct->table[0] = lst;
		pom_mutex_unlock(&ct->locks[0]);
		debug_conntrack("Allocated conntrack %p with parent %p (uniq child)", res, parent);

		registry_perf_inc(s->proto->perf_conn_cur, 1);
		registry_perf_inc(s->proto->perf_conn_tot, 1);

	}

	conntrack_unlock(parent);

	conntrack_lock(res);
	res->refcount++;
	s->ce = res;
	s->direction = s_prev->direction;

	struct proto_process_stack *s_next = &stack[stack_index + 1];
	s_next->direction = s->direction;

	return POM_OK;

err:
	if (res) {
		pthread_mutex_destroy(&res->lock);
		free(res);
	}

	if (child)
		free(child);
	conntrack_unlock(parent);

	return POM_ERR;
}
Example #20
0
int proto_process(struct packet *p, struct proto_process_stack *stack, unsigned int stack_index) {

    struct proto_process_stack *s = &stack[stack_index];

    struct proto *proto = s->proto;

    if (!proto || !proto->info->process)
        return PROTO_ERR;
    int res = proto->info->process(proto->priv, p, stack, stack_index);

    registry_perf_inc(proto->perf_pkts, 1);
    registry_perf_inc(proto->perf_bytes, s->plen);


    if (res != PROTO_OK)
        return res;

    // Process the expectations !
    pom_rwlock_rlock(&proto->expectation_lock);
    struct proto_expectation *e = proto->expectations;
    while (e) {

        int expt_dir = POM_DIR_UNK;

        struct proto_expectation_stack *es = e->tail;
        struct ptype *fwd_value = s->pkt_info->fields_value[s->proto->info->ct_info->fwd_pkt_field_id];
        struct ptype *rev_value = s->pkt_info->fields_value[s->proto->info->ct_info->rev_pkt_field_id];

        if ((!es->fields[POM_DIR_FWD] || ptype_compare_val(PTYPE_OP_EQ, es->fields[POM_DIR_FWD], fwd_value)) &&
                (!es->fields[POM_DIR_REV] || ptype_compare_val(PTYPE_OP_EQ, es->fields[POM_DIR_REV], rev_value))) {
            // Expectation matched the forward direction
            expt_dir = POM_DIR_FWD;
        } else if ((!es->fields[POM_DIR_FWD] || ptype_compare_val(PTYPE_OP_EQ, es->fields[POM_DIR_FWD], rev_value)) &&
                   (!es->fields[POM_DIR_REV] || ptype_compare_val(PTYPE_OP_EQ, es->fields[POM_DIR_REV], fwd_value))) {
            // Expectation matched the reverse direction
            expt_dir = POM_DIR_REV;
        }

        if (expt_dir == POM_DIR_UNK) {
            // Expectation not matched
            e = e->next;
            continue;
        }

        es = es->prev;
        int stack_index_tmp = stack_index - 1;
        while (es) {

            struct proto_process_stack *s_tmp = &stack[stack_index_tmp];

            if (s_tmp->proto != es->proto) {
                e = e->next;
                continue;
            }

            fwd_value = s_tmp->pkt_info->fields_value[s_tmp->proto->info->ct_info->fwd_pkt_field_id];
            rev_value = s_tmp->pkt_info->fields_value[s_tmp->proto->info->ct_info->rev_pkt_field_id];

            if (expt_dir == POM_DIR_FWD) {
                if ((es->fields[POM_DIR_FWD] && !ptype_compare_val(PTYPE_OP_EQ, es->fields[POM_DIR_FWD], fwd_value)) ||
                        (es->fields[POM_DIR_REV] && !ptype_compare_val(PTYPE_OP_EQ, es->fields[POM_DIR_REV], rev_value))) {
                    e = e->next;
                    continue;
                }
            } else {
                if ((es->fields[POM_DIR_FWD] && !ptype_compare_val(PTYPE_OP_EQ, es->fields[POM_DIR_FWD], rev_value)) ||
                        (es->fields[POM_DIR_REV] && !ptype_compare_val(PTYPE_OP_EQ, es->fields[POM_DIR_REV], fwd_value))) {
                    e = e->next;
                    continue;
                }

            }

            es = es->prev;
            stack_index_tmp--;
        }

        // Expectation matched !
        // Relock with write access
        pom_rwlock_unlock(&proto->expectation_lock);
        pom_rwlock_wlock(&proto->expectation_lock);

        debug_expectation("Expectation %p matched !", e);

        // Remove it from the list

        if (e->next)
            e->next->prev = e->prev;

        if (e->prev)
            e->prev->next = e->next;
        else
            proto->expectations = e->next;

        struct proto_process_stack *s_next = &stack[stack_index + 1];
        s_next->proto = e->proto;


        if (conntrack_get_unique_from_parent(stack, stack_index + 1) != POM_OK) {
            proto_expectation_cleanup(e);
            return PROTO_ERR;
        }

        s_next->ce->priv = e->priv;

        if (conntrack_session_bind(s_next->ce, e->session)) {
            proto_expectation_cleanup(e);
            return PROTO_ERR;
        }

        registry_perf_dec(e->proto->perf_expt_pending, 1);
        registry_perf_inc(e->proto->perf_expt_matched, 1);

        proto_expectation_cleanup(e);
        conntrack_unlock(s_next->ce);

        break;

    }
    pom_rwlock_unlock(&proto->expectation_lock);

    return res;
}
Example #21
0
File: core.c Project: k0a1a/pom-ng
void *core_processing_thread_func(void *priv) {

	struct core_processing_thread *tpriv = priv;

	if (packet_info_pool_init()) {
		halt("Error while initializing the packet_info_pool", 1);
		return NULL;
	}

	registry_perf_inc(perf_thread_active, 1);

	pom_mutex_lock(&tpriv->pkt_queue_lock);

	while (core_run) {
		
		while (!tpriv->pkt_queue_head) {
			// We are not active while waiting for a packet
			registry_perf_dec(perf_thread_active, 1);

			debug_core("thread %u : waiting", tpriv->thread_id);

			if (registry_perf_getval(perf_thread_active) == 0) {
				if (core_get_state() == core_state_finishing)
					core_set_state(core_state_idle);
			}

			if (!core_run) {
				pom_mutex_unlock(&tpriv->pkt_queue_lock);
				goto end;
			}

			int res = pthread_cond_wait(&tpriv->pkt_queue_cond, &tpriv->pkt_queue_lock);
			if (res) {
				pomlog(POMLOG_ERR "Error while waiting for restart condition : %s", pom_strerror(res));
				abort();
				return NULL;
			}
			registry_perf_inc(perf_thread_active, 1);
		}


		// Dequeue a packet
		struct core_packet_queue *tmp = tpriv->pkt_queue_head;
		tpriv->pkt_queue_head = tmp->next;
		if (!tpriv->pkt_queue_head)
			tpriv->pkt_queue_tail = NULL;


		// Add it to the unused list
		tmp->next = tpriv->pkt_queue_unused;
		tpriv->pkt_queue_unused = tmp;

		tpriv->pkt_count--;

		registry_perf_dec(perf_pkt_queue, 1);

		__sync_fetch_and_sub(&core_pkt_queue_count, 1);

		if (tpriv->pkt_count < CORE_THREAD_PKT_QUEUE_MIN) {

			pom_mutex_lock(&core_pkt_queue_wait_lock);
			// Tell the input processes that they can continue queuing packets
			int res = pthread_cond_broadcast(&core_pkt_queue_wait_cond);
			if (res) {
				pomlog(POMLOG_ERR "Error while signaling the main pkt_queue condition : %s", pom_strerror(res));
				abort();
			}
			pom_mutex_unlock(&core_pkt_queue_wait_lock);
		}

		// Keep track of our packet
		struct packet *pkt = tmp->pkt;

		debug_core("thread %u : Processing packet %p (%u.%06u)", tpriv->thread_id, pkt, pom_ptime_sec(pkt->ts), pom_ptime_usec(pkt->ts));
		pom_mutex_unlock(&tpriv->pkt_queue_lock);

		// Lock the processing lock
		pom_rwlock_rlock(&core_processing_lock);

		// Update the current clock
		if (core_clock[tpriv->thread_id] < pkt->ts) // Make sure we keep it monotonous
			core_clock[tpriv->thread_id] = pkt->ts;

		//pomlog(POMLOG_DEBUG "Thread %u processing ...", pthread_self());
		if (core_process_packet(pkt) == POM_ERR) {
			core_run = 0;
			pom_rwlock_unlock(&core_processing_lock);
			break;
		}

		// Process timers
		if (timers_process() != POM_OK) {
			pom_rwlock_unlock(&core_processing_lock);
			break;
		}

		pom_rwlock_unlock(&core_processing_lock);

		if (packet_release(pkt) != POM_OK) {
			pomlog(POMLOG_ERR "Error while releasing the packet");
			break;
		}
		
		debug_core("thread %u : Processed packet %p (%u.%06u)", tpriv->thread_id, pkt, pom_ptime_sec(pkt->ts), pom_ptime_usec(pkt->ts));
		// Re-lock our queue for the next run
		pom_mutex_lock(&tpriv->pkt_queue_lock);

	}

	halt("Processing thread encountered an error", 1);
end:
	packet_info_pool_cleanup();

	return NULL;
}
Example #22
0
File: core.c Project: k0a1a/pom-ng
int core_queue_packet(struct packet *p, unsigned int flags, unsigned int thread_affinity) {

	
	// Update the counters
	registry_perf_inc(p->input->perf_pkts_in, 1);
	registry_perf_inc(p->input->perf_bytes_in, p->len);

	if (!core_run)
		return POM_ERR;

	debug_core("Queuing packet %p (%u.%06u)", p, pom_ptime_sec(p->ts), pom_ptime_usec(p->ts));

	// Find the right thread to queue to

	struct core_processing_thread *t = NULL;
	if (flags & CORE_QUEUE_HAS_THREAD_AFFINITY) {
		t = core_processing_threads[thread_affinity % core_num_threads];
		pom_mutex_lock(&t->pkt_queue_lock);
	} else {
		static volatile unsigned int start = 0;
		unsigned int i;
		while (1) {
			unsigned int thread_id = start;
			for (i = 0; i < core_num_threads; i++) {
				thread_id++;
				if (thread_id >= core_num_threads)
					thread_id -= core_num_threads;
				t = core_processing_threads[thread_id];
				int res = pthread_mutex_trylock(&t->pkt_queue_lock);
				if (res == EBUSY) {
					// Thread is busy, go to the next one
					continue;
				} else if (res) {
					pomlog(POMLOG_ERR "Error while locking a processing thread pkt_queue mutex : %s", pom_strerror(res));
					abort();
					return POM_ERR;
				}

				// We've got the lock, check if it's ok to queue here
				if (t->pkt_count < CORE_THREAD_PKT_QUEUE_MAX) {
					// Use this thread
					break;
				}

				// Too many packets pending in this thread, go to the next one
				pom_mutex_unlock(&t->pkt_queue_lock);
			}

			if (i < core_num_threads) {
				// We locked on a thread
				start = thread_id;
				break;
			}

			// No thread found
			if (core_pkt_queue_count >= ((CORE_THREAD_PKT_QUEUE_MAX - 1) * core_num_threads)) {
				// Queue full
				if (flags & CORE_QUEUE_DROP_IF_FULL) {
					// TODO add dropped stats
					debug_core("Dropped packet %p (%u.%06u) to thread %u", p, pom_ptime_sec(p->ts), pom_ptime_usec(p->ts));
					return POM_OK;
				}

				// We're not going to drop this. Wait then
				debug_core("All queues full. Waiting ...");
				pom_mutex_lock(&core_pkt_queue_wait_lock);

				// Recheck the count after locking
				if (core_pkt_queue_count >= ((CORE_THREAD_PKT_QUEUE_MAX - 1) * core_num_threads)) {
					int res = pthread_cond_wait(&core_pkt_queue_wait_cond, &core_pkt_queue_wait_lock);
					if (res) {
						pomlog(POMLOG_ERR "Error while waiting for the core pkt_queue condition : %s", pom_strerror(res));
						abort();
					}
				}
				pom_mutex_unlock(&core_pkt_queue_wait_lock);

			}
		}

	}

	// We've got the thread's lock, add it to the queue

	struct core_packet_queue *tmp = NULL;
	if (t->pkt_queue_unused) {
		tmp = t->pkt_queue_unused;
		t->pkt_queue_unused = tmp->next;
	} else {
		tmp = malloc(sizeof(struct core_packet_queue));
		if (!tmp) {
			pom_mutex_unlock(&t->pkt_queue_lock);
			pom_oom(sizeof(struct core_packet_queue));
			return POM_ERR;
		}
	}

	tmp->pkt = p;
	tmp->next = NULL;
	if (t->pkt_queue_tail) {
		t->pkt_queue_tail->next = tmp;
	} else {
		t->pkt_queue_head = tmp;

		// The queue was empty, we need to signal it
		int res = pthread_cond_signal(&t->pkt_queue_cond);
		if (res) {
			pomlog(POMLOG_ERR "Error while signaling the thread pkt_queue restart condition : %s", pom_strerror(res));
			abort();
			return POM_ERR;
		}

	}
	t->pkt_queue_tail = tmp;

	t->pkt_count++;
	__sync_fetch_and_add(&core_pkt_queue_count, 1);

	registry_perf_inc(perf_pkt_queue, 1);

	debug_core("Queued packet %p (%u.%06u) to thread %u", p, pom_ptime_sec(p->ts), pom_ptime_usec(p->ts), t->thread_id);

	pom_mutex_unlock(&t->pkt_queue_lock);

	return POM_OK;
}
Example #23
0
int conntrack_get(struct proto_process_stack *stack, unsigned int stack_index) {

	struct proto_process_stack *s = &stack[stack_index];
	struct proto_process_stack *s_prev = &stack[stack_index - 1];
	struct proto_process_stack *s_next = &stack[stack_index + 1];

	if (s->ce)
		return POM_OK;
		
	if (!s->proto || !s->proto->info->ct_info)
		return POM_ERR;

	struct ptype *fwd_value = s->pkt_info->fields_value[s->proto->info->ct_info->fwd_pkt_field_id];
	if (!fwd_value)
		return POM_ERR;

	struct ptype *rev_value = NULL;
	if (s->proto->info->ct_info->rev_pkt_field_id != CONNTRACK_PKT_FIELD_NONE) {
		rev_value = s->pkt_info->fields_value[s->proto->info->ct_info->rev_pkt_field_id];
		if (!rev_value)
			return POM_ERR;
	}

	struct conntrack_tables *ct = s->proto->ct;

	uint32_t hash = conntrack_hash(fwd_value, rev_value, s_prev->ce) % ct->table_size;

	// Lock the specific hash while browsing for a conntrack
	pom_mutex_lock(&ct->locks[hash]);

	// Try to find the conntrack in the forward table

	// Check if we can find this entry in the forward way
	if (ct->table[hash]) {
		s->ce = conntrack_find(ct->table[hash], fwd_value, rev_value, s_prev->ce);
		if (s->ce) {
			s->direction = POM_DIR_FWD;
			s_next->direction = POM_DIR_FWD;
			pom_mutex_lock(&s->ce->lock);
			s->ce->refcount++;
			pom_mutex_unlock(&ct->locks[hash]);
			return POM_OK;;
		}
	}


	// It wasn't found in the forward way, maybe in the reverse direction ?
	if (rev_value) {
		s->ce = conntrack_find(ct->table[hash], rev_value, fwd_value, s_prev->ce);
		if (s->ce) {
			s->direction = POM_DIR_REV;
			s_next->direction = POM_DIR_REV;
			pom_mutex_lock(&s->ce->lock);
			s->ce->refcount++;
			pom_mutex_unlock(&ct->locks[hash]);
			return POM_OK;
		}

	}

	// It's not found in the reverse direction either, let's create it then

	if (s_prev->direction == POM_DIR_REV && rev_value) {
		// This indicates that the parent conntrack matched in a reverse direction
		// Let's keep directions consistent and swap fwd and rev values
		struct ptype *tmp = rev_value;
		rev_value = fwd_value;
		fwd_value = tmp;
	}


	// Alloc the conntrack entry
	struct conntrack_entry *ce = malloc(sizeof(struct conntrack_entry));
	if (!ce) {
		pom_mutex_unlock(&ct->locks[hash]);
		pom_oom(sizeof(struct conntrack_entry));
		return POM_ERR;
	}
	memset(ce, 0, sizeof(struct conntrack_entry));

	if (pom_mutex_init_type(&ce->lock, PTHREAD_MUTEX_ERRORCHECK) != POM_OK) {
		pom_mutex_unlock(&ct->locks[hash]);
		free(ce);
		return POM_ERR;
	}

	struct conntrack_node_list *child = NULL;

	// We shouldn't have to check if the parent still exists as it
	// is supposed to have a refcount since conntrack_get is called after
	// the parent's conntrack_get was called and before conntrack_refcount_dec
	// was called by core_process_stack.
	if (s_prev->ce) {

		child = malloc(sizeof(struct conntrack_node_list));
		if (!child) {
			pthread_mutex_destroy(&ce->lock);
			pom_mutex_unlock(&ct->locks[hash]);
			free(ce);
			pom_oom(sizeof(struct conntrack_node_list));
			return POM_ERR;
		}
		memset(child, 0, sizeof(struct conntrack_node_list));

		child->ce = ce;
		child->ct = s->proto->ct;
		child->hash = hash;

		ce->parent = malloc(sizeof(struct conntrack_node_list));
		if (!ce->parent) {
			pthread_mutex_destroy(&ce->lock);
			pom_mutex_unlock(&ct->locks[hash]);
			free(child);
			free(ce);
			pom_oom(sizeof(struct conntrack_node_list));
			return POM_ERR;
		}
		ce->parent->ce = s_prev->ce;
		ce->parent->ct = s_prev->ce->proto->ct;
		ce->parent->hash = s_prev->ce->hash;

	}

	ce->proto = s->proto;

	ce->hash = hash;

	struct conntrack_list *lst = NULL;

	ce->fwd_value = ptype_alloc_from(fwd_value);
	if (!ce->fwd_value)
		goto err;

	if (rev_value) {
		ce->rev_value = ptype_alloc_from(rev_value);
		if (!ce->rev_value)
			goto err;
	}
	// Alloc the list node
	lst = malloc(sizeof(struct conntrack_list));
	if (!lst) {
		ptype_cleanup(ce->fwd_value);
		pom_oom(sizeof(struct conntrack_list));
		goto err;
	}
	memset(lst, 0, sizeof(struct conntrack_list));
	lst->ce = ce;

	// Insert in the conntrack table
	lst->next = ct->table[hash];
	if (lst->next) {
		lst->next->prev = lst;
		registry_perf_inc(s->proto->perf_conn_hash_col, 1);
	}
	ct->table[hash] = lst;

	// Add the child to the parent if any
	if (child) {
		pom_mutex_lock(&s_prev->ce->lock);
		if (!s_prev->ce->refcount)
			pomlog(POMLOG_WARN "Internal error, the parent is supposed to have a refcount > 0");
		child->next = s_prev->ce->children;
		if (child->next)
			child->next->prev = child;
		s_prev->ce->children = child;
		pom_mutex_unlock(&s_prev->ce->lock);
	}

	// Unlock the table
	if (s_prev->ce) {
		debug_conntrack("Allocated conntrack %p with parent %p", ce, s_prev->ce);
	} else {
		debug_conntrack("Allocated conntrack %p with no parent", ce);
	}
	pom_mutex_lock(&ce->lock);
	ce->refcount++;
	pom_mutex_unlock(&ct->locks[hash]);

	s->ce = ce;
	s->direction = s_prev->direction;

	// Propagate the direction to the payload as well
	s_next->direction = s->direction;
	
	registry_perf_inc(ce->proto->perf_conn_cur, 1);
	registry_perf_inc(ce->proto->perf_conn_tot, 1);

	return POM_OK;

err:
	pom_mutex_unlock(&ct->locks[hash]);

	pthread_mutex_destroy(&ce->lock);
	if (child)
		free(child);

	if (lst)
		free(lst);

	if (ce->parent)
		free(ce->parent);
	
	if (ce->fwd_value)
		ptype_cleanup(ce->fwd_value);

	if (ce->rev_value)
		ptype_cleanup(ce->rev_value);

	free(ce);

	return POM_ERR;
}
Example #24
0
static int proto_ipv4_process(void *proto_priv, struct packet *p, struct proto_process_stack *stack, unsigned int stack_index) {

	struct proto_process_stack *s = &stack[stack_index];
	struct proto_process_stack *s_next = &stack[stack_index + 1];

	struct ip* hdr = s->pload;

	unsigned int hdr_len = hdr->ip_hl * 4;

	if (s->plen < sizeof(struct ip) || // length smaller than header
		hdr->ip_hl < 5 || // ip header < 5 bytes
		ntohs(hdr->ip_len) < hdr_len || // datagram size < ip header length
		ntohs(hdr->ip_len) > s->plen) { // datagram size > given size
		return PROTO_INVALID;
	}


	PTYPE_IPV4_SETADDR(s->pkt_info->fields_value[proto_ipv4_field_src], hdr->ip_src);
	PTYPE_IPV4_SETADDR(s->pkt_info->fields_value[proto_ipv4_field_dst], hdr->ip_dst);
	PTYPE_UINT8_SETVAL(s->pkt_info->fields_value[proto_ipv4_field_tos], hdr->ip_tos);
	PTYPE_UINT8_SETVAL(s->pkt_info->fields_value[proto_ipv4_field_ttl], hdr->ip_ttl);

	// Handle conntrack stuff
	if (conntrack_get(stack, stack_index) != POM_OK)
		return PROTO_ERR;


	s_next->pload = s->pload + hdr_len;
	s_next->plen = ntohs(hdr->ip_len) - hdr_len;

	s_next->proto = proto_get_by_number(s->proto, hdr->ip_p);


	int res = POM_ERR;
	if (s->ce->children) {
		res = conntrack_delayed_cleanup(s->ce, 0, p->ts);
	} else {
		uint32_t *conntrack_timeout = PTYPE_UINT32_GETVAL(param_conntrack_timeout);
		res = conntrack_delayed_cleanup(s->ce, *conntrack_timeout, p->ts);
	}
	if (res == POM_ERR) {
		conntrack_unlock(s->ce);
		return PROTO_ERR;
	}

	uint16_t frag_off = ntohs(hdr->ip_off);

	// Check if packet is fragmented and need more handling

	if (frag_off & IP_DONT_FRAG) {
		conntrack_unlock(s->ce);
		return PROTO_OK; // Nothing to do
	}

	if (!(frag_off & IP_MORE_FRAG) && !(frag_off & IP_OFFSET_MASK)) {
		conntrack_unlock(s->ce);
		return PROTO_OK; // Nothing to do, full packet
	}

	uint16_t offset = (frag_off & IP_OFFSET_MASK) << 3;
	size_t frag_size = ntohs(hdr->ip_len) - (hdr->ip_hl * 4);

	// Ignore invalid fragments
	if (frag_size > 0xFFFF) {
		conntrack_unlock(s->ce);
		return PROTO_INVALID;
	}

	if (frag_size > s->plen + hdr_len) {
		conntrack_unlock(s->ce);
		return PROTO_INVALID;
	}

	// Account for one more fragment
	registry_perf_inc(perf_frags, 1);

	struct proto_ipv4_fragment *tmp = s->ce->priv;

	// Let's find the right buffer
	for (; tmp && tmp->id != hdr->ip_id; tmp = tmp->next);

	if (!tmp) {
		// Buffer not found, create it
		tmp = malloc(sizeof(struct proto_ipv4_fragment));
		if (!tmp) {
			pom_oom(sizeof(struct proto_ipv4_fragment));
			conntrack_unlock(s->ce);
			return PROTO_ERR;
		}
		memset(tmp, 0, sizeof(struct proto_ipv4_fragment));

		tmp->t = conntrack_timer_alloc(s->ce, proto_ipv4_fragment_cleanup, tmp);
		if (!tmp->t) {
			conntrack_unlock(s->ce);
			free(tmp);
			return PROTO_ERR;
		}
		
		tmp->id = hdr->ip_id;

		if (!s_next->proto) {
			// Set processed flag so no attempt to process this will be done
			tmp->flags |= PROTO_IPV4_FLAG_PROCESSED;
			conntrack_unlock(s->ce);
			conntrack_timer_cleanup(tmp->t);
			free(tmp);
			return PROTO_STOP;
		}

		tmp->multipart = packet_multipart_alloc(s_next->proto, 0);
		if (!tmp->multipart) {
			conntrack_unlock(s->ce);
			conntrack_timer_cleanup(tmp->t);
			free(tmp);
			return PROTO_ERR;
		}

		tmp->next = s->ce->priv;
		if (tmp->next)
			tmp->next->prev = tmp;
		s->ce->priv = tmp;
	}

	// Fragment was already handled
	if (tmp->flags & PROTO_IPV4_FLAG_PROCESSED) {
		conntrack_unlock(s->ce);
		registry_perf_inc(perf_frags_dropped, 1);
		return PROTO_STOP;
	}
	
	// Add the fragment
	if (packet_multipart_add_packet(tmp->multipart, p, offset, frag_size, (s->pload - (void*)p->buff) + (hdr->ip_hl * 4)) != POM_OK) {
		conntrack_unlock(s->ce);
		packet_multipart_cleanup(tmp->multipart);
		conntrack_timer_cleanup(tmp->t);
		free(tmp);
		return PROTO_ERR;
	}
	tmp->count++;

	// Schedule the timeout for the fragment
	uint32_t *frag_timeout = PTYPE_UINT32_GETVAL(param_frag_timeout);
	conntrack_timer_queue(tmp->t, *frag_timeout, p->ts);


	if (!(frag_off & IP_MORE_FRAG))
		tmp->flags |= PROTO_IPV4_FLAG_GOT_LAST;

	if ((tmp->flags & PROTO_IPV4_FLAG_GOT_LAST) && !tmp->multipart->gaps)
		tmp->flags |= PROTO_IPV4_FLAG_PROCESSED;


	conntrack_unlock(s->ce);
	
	if ((tmp->flags & PROTO_IPV4_FLAG_PROCESSED)) {
		int res = packet_multipart_process(tmp->multipart, stack, stack_index + 1);
		tmp->multipart = NULL; // Multipart will be cleared automatically
		if (res == PROTO_ERR) {
			return PROTO_ERR;
		} else if (res == PROTO_INVALID) {
			registry_perf_inc(perf_frags_dropped, tmp->count);
		} else {
			registry_perf_inc(perf_reassembled_pkts, 1);
		}
	}

	return PROTO_STOP; // Stop processing the packet

}
Example #25
0
int timer_queue_now(struct timer *t, unsigned int expiry, ptime now) {


	pom_mutex_lock(&timer_main_lock);

	// Timer is still queued, dequeue it
	if (t->queue) {
		if (t->prev) {
			t->prev->next = t->next;
		} else {
			t->queue->head = t->next;
			if (t->queue->head)
				t->queue->head->prev = NULL;
		}

		if (t->next) {
			t->next->prev = t->prev;
		} else {
			t->queue->tail = t->prev;
			if (t->queue->tail)
				t->queue->tail->next = NULL;
			
		}
		t->queue = NULL;
		t->prev = NULL;
		t->next = NULL;
	} else {
		registry_perf_inc(perf_timer_queued, 1);
	}

	struct timer_queue *tq = timer_queues;

	// First find the right queue or create it
	
	if (!tq) {

		// There is no queue yet
		tq = malloc(sizeof(struct timer_queue));
		if (!tq) {
			pom_mutex_unlock(&timer_main_lock);
			pom_oom(sizeof(struct timer_queue));
			return POM_ERR;
		}
		memset(tq, 0, sizeof(struct timer_queue));
		timer_queues = tq;

		tq->expiry = expiry;

		registry_perf_inc(perf_timer_queues, 1);

	} else {

		while (tq) {
			
			if (tq->expiry == expiry) { // The right queue already exists
				
				break;

			} else if (tq->expiry > expiry) { // The right queue doesn't exists and we are too far in the list

				struct timer_queue *tmp;
				tmp = malloc(sizeof(struct timer_queue));
				if (!tmp) {
					pom_oom(sizeof(struct timer_queue));
					pom_mutex_unlock(&timer_main_lock);
					return POM_ERR;
				}
				memset(tmp, 0, sizeof(struct timer_queue));

				tmp->prev = tq->prev;
				tmp->next = tq;
				tq->prev = tmp;

				if (tmp->prev)
					tmp->prev->next = tmp;
				else
					timer_queues = tmp;


				tq = tmp;
				tq->expiry = expiry;

				registry_perf_inc(perf_timer_queues, 1);

				break;
			
			} else if (!tq->next) { // Looks like we are at the end of our list

				struct timer_queue *tmp;
				tmp = malloc(sizeof(struct timer_queue));
				if (!tmp) {
					pom_oom(sizeof(struct timer_queue));
					pom_mutex_unlock(&timer_main_lock);
					return POM_ERR;
				}
				memset(tmp, 0, sizeof(struct timer_queue));

				tmp->prev = tq;
				
				tq->next = tmp;

				tq = tmp;

				tq->expiry = expiry;

				registry_perf_inc(perf_timer_queues, 1);
				
				break;
			}

			tq = tq->next;
		}

	}

	// Now we can queue the timer
	
	if (tq->head == NULL) {
		tq->head = t;
		tq->tail = t;
	} else {
		t->prev = tq->tail;
		tq->tail = t;
		t->prev->next = t;
	}

	// Update the expiry time
	
	t->expires = now + (expiry * 1000000UL);
	t->queue = tq;
	pom_mutex_unlock(&timer_main_lock);


	return POM_OK;
}
Example #26
0
int timers_process() {


	static int processing = 0;

	int res = pthread_mutex_trylock(&timer_main_lock);
	if (res == EBUSY) {
		// Already locked, give up
		return POM_OK;
	} else if (res) {
		// Something went wrong
		pomlog(POMLOG_ERR "Error while trying to lock the main timer lock : %s", pom_strerror(res));
		abort();
		return POM_ERR;
	}

	// Another thread is already processing the timers, drop out
	if (processing) {
		pom_mutex_unlock(&timer_main_lock);
		return POM_OK;
	}

	processing = 1;

	ptime now = core_get_clock();

	struct timer_queue *tq;
	tq = timer_queues;

	while (tq) {
		while (tq->head && (tq->head->expires < now)) {
				
			// Dequeue the timer
			struct timer *tmp = tq->head;
			tq->head = tq->head->next;
			if (tq->head)
				tq->head->prev = NULL;
			else
				tq->tail = NULL;

			tmp->next = NULL;
			tmp->prev = NULL;
			tmp->queue = NULL;
			pom_mutex_unlock(&timer_main_lock);
			registry_perf_dec(perf_timer_queued, 1);

			// Process it
			debug_timer( "Timer 0x%lx reached. Starting handler ...", (unsigned long) tmp);
			if ((*tmp->handler) (tmp->priv, now) != POM_OK) {
				return POM_ERR;
			}

			registry_perf_inc(perf_timer_processed, 1);

			pom_mutex_lock(&timer_main_lock);

		}
		tq = tq->next;

	}

	processing = 0;

	pom_mutex_unlock(&timer_main_lock);

	return POM_OK;
}
Example #27
0
int proto_process(struct packet *p, struct proto_process_stack *stack, unsigned int stack_index) {

	struct proto_process_stack *s = &stack[stack_index];

	struct proto *proto = s->proto;

	if (!proto || !proto->info->process)
		return PROTO_ERR;
	int res = proto->info->process(proto->priv, p, stack, stack_index);

	registry_perf_inc(proto->perf_pkts, 1);
	registry_perf_inc(proto->perf_bytes, s->plen);

	if (res != PROTO_OK)
		return res;

	int matched = 0;

	// Process the expectations !
	pom_rwlock_rlock(&proto->expectation_lock);
	struct proto_expectation *e = NULL;
	for (e = proto->expectations; e; e = e->next) {

		if (e->flags & PROTO_EXPECTATION_FLAG_MATCHED) {
			// Another thread already matched the expectation, continue
			continue;
		}
		
		// Bit one means it matches the forward direction
		// Bit two means it matches the reverse direction

		int expt_dir = 3;

		struct proto_expectation_stack *es = e->tail;
		int stack_index_tmp = stack_index;
		while (es) {

			struct proto_process_stack *s_tmp = &stack[stack_index_tmp];

			if (s_tmp->proto != es->proto) {
				expt_dir = 0;
				break;
			}

			struct ptype *fwd_value = s_tmp->pkt_info->fields_value[s_tmp->proto->info->ct_info->fwd_pkt_field_id];
			struct ptype *rev_value = s_tmp->pkt_info->fields_value[s_tmp->proto->info->ct_info->rev_pkt_field_id];

			if (expt_dir & 1) {
				if ((es->fields[POM_DIR_FWD] && !ptype_compare_val(PTYPE_OP_EQ, es->fields[POM_DIR_FWD], fwd_value)) ||
					(es->fields[POM_DIR_REV] && !ptype_compare_val(PTYPE_OP_EQ, es->fields[POM_DIR_REV], rev_value))) {
					expt_dir &= ~1; // It doesn't match in the forward direction
				}
			}

			if (expt_dir & 2) {
				if ((es->fields[POM_DIR_FWD] && !ptype_compare_val(PTYPE_OP_EQ, es->fields[POM_DIR_FWD], rev_value)) ||
					(es->fields[POM_DIR_REV] && !ptype_compare_val(PTYPE_OP_EQ, es->fields[POM_DIR_REV], fwd_value))) {
					expt_dir &= ~2;
				}
			}

			if (!expt_dir)
				break;

			es = es->prev;
			stack_index_tmp--;
		}

		if (expt_dir) {
			// It matched
			if (!(__sync_fetch_and_or(&e->flags, PROTO_EXPECTATION_FLAG_MATCHED) & PROTO_EXPECTATION_FLAG_MATCHED)) {
				// Something matched
				matched++;
			}
		}
	}
	pom_rwlock_unlock(&proto->expectation_lock);

	if (!matched)
		return POM_OK;

	// At least one expectation matched !
	debug_expectation("%u expectation matched !", matched);

	// Relock with write access
	pom_rwlock_wlock(&proto->expectation_lock);
	e = proto->expectations;
	while (e) {

		struct proto_expectation *cur = e;
		e = e->next;

		if (!(cur->flags & PROTO_EXPECTATION_FLAG_MATCHED))
			continue;

		// Remove the expectation from the conntrack
		if (cur->next)
			cur->next->prev = cur->prev;
		if (cur->prev)
			cur->prev->next = cur->next;
		else
			proto->expectations = cur->next;

		// Remove matched and queued flags
		__sync_fetch_and_and(&cur->flags, ~(PROTO_EXPECTATION_FLAG_MATCHED | PROTO_EXPECTATION_FLAG_QUEUED));

		struct proto_process_stack *s_next = &stack[stack_index + 1];
		s_next->proto = cur->proto;

		if (conntrack_get_unique_from_parent(stack, stack_index + 1) != POM_OK) {
			proto_expectation_cleanup(cur);
			continue;
		}

		if (!s_next->ce->priv) {
			s_next->ce->priv = cur->priv;
			// Prevent cleanup of private data while cleaning the expectation
			cur->priv = NULL;
		}


		if (cur->session) {
			if (conntrack_session_bind(s_next->ce, cur->session)) {
				proto_expectation_cleanup(cur);
				continue;
			}
		}

		registry_perf_dec(cur->proto->perf_expt_pending, 1);
		registry_perf_inc(cur->proto->perf_expt_matched, 1);

		if (cur->match_callback) {
			// Call the callback with the conntrack locked
			cur->match_callback(cur, cur->callback_priv, s_next->ce);
			// Nullify callback_priv so it doesn't get cleaned up
			cur->callback_priv = NULL;
		}

		if (cur->expiry) {
			// The expectation was added using 'add_and_cleanup' function
			proto_expectation_cleanup(cur);
		}

		conntrack_unlock(s_next->ce);

	}
	pom_rwlock_unlock(&proto->expectation_lock);


	return res;
}