コード例 #1
0
ファイル: core.c プロジェクト: Astalaseven/pom-ng
void core_wait_state(enum core_state state) {
	pom_mutex_lock(&core_state_lock);
	while (core_cur_state != state) {
		if (pthread_cond_wait(&core_state_cond, &core_state_lock)) {
			pomlog(POMLOG_ERR "Error while waiting for core cond : %s", pom_strerror(errno));
			abort();
			break;
		}
	}
	pom_mutex_unlock(&core_state_lock);
}
コード例 #2
0
static int analyzer_docsis_cm_timeout(void *cable_modem, ptime now) {

	struct analyzer_docsis_cm *cm = cable_modem;
	struct analyzer_docsis_priv *priv = cm->analyzer->priv;

	pom_mutex_lock(&priv->lock);
	analyzer_docsis_reg_status_update(priv, cm, docsis_mmt_rng_status_unknown, now, NULL, 0);
	pom_mutex_unlock(&priv->lock);

	return POM_OK;
}
コード例 #3
0
ファイル: timer.c プロジェクト: Astalaseven/pom-ng
int timer_dequeue(struct timer *t) {

	// First let's check if it's the one at the begining of the queue

	pom_mutex_lock(&timer_main_lock);

	if (!t->queue) {
		pomlog(POMLOG_WARN "Warning, timer %p was already dequeued", t);
		pom_mutex_unlock(&timer_main_lock);
		return POM_OK;
	}

	if (t->prev) {
		t->prev->next = t->next;
	} else {
		t->queue->head = t->next;
		if (t->queue->head)
			t->queue->head->prev = NULL;
	}

	if (t->next) {
		t->next->prev = t->prev;
	} else {
		t->queue->tail = t->prev;
		if (t->queue->tail)
			t->queue->tail->next = NULL;
		
	}


	// Make sure this timer will not reference anything

	t->prev = NULL;
	t->next = NULL;
	t->queue = NULL;
	pom_mutex_unlock(&timer_main_lock);

	registry_perf_dec(perf_timer_queued, 1);

	return POM_OK;
}
コード例 #4
0
ファイル: core.c プロジェクト: gmsoft-tuxicoman/pom-ng
int core_process_dump_info(struct proto_process_stack *s, struct packet *p, int res) {

	char *res_str = "unknown result code";
	switch (res) {
		case PROTO_OK:
			res_str = "processed ok";
			break;
		case PROTO_INVALID:
			res_str = "invalid packet";
			break;
		case PROTO_STOP:
			res_str = "processing stopped";
			break;
		case PROTO_ERR:
			res_str = "processing encountered an error";
			break;
	}

	static pthread_mutex_t debug_lock = PTHREAD_MUTEX_INITIALIZER;

	pom_mutex_lock(&debug_lock);
	printf("thread %u | %u.%u | ", (unsigned int)pthread_self(), (int)pom_ptime_sec(p->ts), (int)pom_ptime_usec(p->ts));

	// Dump packet info
	int i;	
	for (i = 1; i < CORE_PROTO_STACK_MAX - 1 && s[i].proto; i++) {
		printf("%s { ", s[i].proto->info->name);
	
		char buff[256];

		if (s[i].pkt_info) {

			if (s[i].proto->info->pkt_fields) {
				int j;
				for (j = 0; s[i].proto->info->pkt_fields[j].name; j++) {
					ptype_print_val(s[i].pkt_info->fields_value[j], buff, sizeof(buff) - 1, NULL);
					printf("%s: %s; ", s[i].proto->info->pkt_fields[j].name, buff);
				}
			}
		} else {
			printf("pkt_info missing ");
		}

		printf("}; ");
	}
	printf(": %s\n", res_str);
	pom_mutex_unlock(&debug_lock);

	return POM_OK;
}
コード例 #5
0
ファイル: packet.c プロジェクト: dkarametos/pom-ng
struct packet *packet_pool_get() {

	pom_mutex_lock(&packet_list_mutex);

	struct packet *tmp = packet_unused_head;

	if (!tmp) {
		// Alloc a new packet
		tmp = malloc(sizeof(struct packet));
		if (!tmp) {
			pom_mutex_unlock(&packet_list_mutex);
			pom_oom(sizeof(struct packet));
			return NULL;
		}
	} else {
		// Fetch it from the unused pool
		packet_unused_head = tmp->next;
		if (packet_unused_head)
			packet_unused_head->prev = NULL;
	}

	memset(tmp, 0, sizeof(struct packet));

	// Add the packet to the used pool
	tmp->next = packet_head;
	if (tmp->next)
		tmp->next->prev = tmp;
	
	packet_head = tmp;

	tmp->refcount = 1;
	
	pom_mutex_unlock(&packet_list_mutex);

	return tmp;
}
コード例 #6
0
int addon_output_close(void *output_priv) {

	struct addon_instance_priv *p = output_priv;

	pom_mutex_lock(&p->lock);
	lua_getfield(p->L, LUA_REGISTRYINDEX, ADDON_INSTANCE); // Stack : self
	lua_getfield(p->L, -1, "close"); // Stack : self, close_func

	lua_pushvalue(p->L, -2); // Stack : self, close_func, self

	int res =  addon_pcall(p->L, 1, 0); // Stack : self
	
	lua_pop(p->L, 1); // Stack : empty
	pom_mutex_unlock(&p->lock);

	return res;
}
コード例 #7
0
ファイル: core.c プロジェクト: dkarametos/pom-ng
void core_get_clock(struct timeval *now) {

	pom_mutex_lock(&core_clock_lock);

	memcpy(now, &core_clock[0], sizeof(struct timeval));

	// Take only the least recent time
	int i;
	for (i = 1; i < core_num_threads; i++) {
		if ((now->tv_sec > core_clock[i].tv_sec) ||
			((now->tv_sec == core_clock[i].tv_sec) && (now->tv_usec > core_clock[i].tv_sec))) {
			memcpy(now, &core_clock, sizeof(struct timeval));
		}
	}

	pom_mutex_unlock(&core_clock_lock);

}
コード例 #8
0
ファイル: timer.c プロジェクト: gmsoft-tuxicoman/pom-ng
int timer_sys_cleanup(struct timer_sys *t) {

	pom_mutex_lock(&timer_sys_lock);
	if (t->prev || t->next || timer_sys_head == t) {
		if (t->prev)
			t->prev->next = t->next;
		else
			timer_sys_head = t->next;
		if (t->next)
			t->next->prev = t->prev;
		else
			timer_sys_tail = t->prev;
	}
	pom_mutex_unlock(&timer_sys_lock);

	free(t);
	return POM_OK;
}
コード例 #9
0
ファイル: timer.c プロジェクト: gmsoft-tuxicoman/pom-ng
int timer_sys_dequeue(struct timer_sys *t) {

	pom_mutex_lock(&timer_sys_lock);
	if (t->prev || t->next || timer_sys_head == t) {
		if (t->prev)
			t->prev->next = t->next;
		else
			timer_sys_head = t->next;
		if (t->next)
			t->next->prev = t->prev;
		else
			timer_sys_tail = t->prev;
		t->prev = NULL;
		t->next = NULL;
	}
	pom_mutex_unlock(&timer_sys_lock);
	return POM_OK;
}
コード例 #10
0
ファイル: conntrack.c プロジェクト: elfixit/pom-ng
int conntrack_timer_cleanup(struct conntrack_timer *t) {

#ifdef DEBUG_CONNTRACK
	int res = pthread_mutex_lock(&t->ce->lock);

	if (!res) {
		pomlog(POMLOG_ERR "Internal error, conntrack not locked when timer cleaned up");
		pom_mutex_unlock(&t->ce->lock);
	} else if (res != EDEADLK) {
		pomlog(POMLOG_ERR "Error while locking timer lock : %s", pom_strerror(errno));
		abort();
	}
#endif

	timer_cleanup(t->timer);
	free(t);
	return POM_OK;

}
コード例 #11
0
ファイル: registry.c プロジェクト: k0a1a/pom-ng
void registry_perf_reset(struct registry_perf *p) {

	if (p->type == registry_perf_type_gauge)
		return;

	if (p->update_hook) {
		pom_mutex_lock(&p->hook_lock);
		p->value = 0;
		pom_mutex_unlock(&p->hook_lock);
	} if (p->type == registry_perf_type_timeticks) {
		uint64_t running = p->value & REGISTRY_PERF_TIMETICKS_STARTED;
		if (running) {
			p->value = pom_gettimeofday() + REGISTRY_PERF_TIMETICKS_STARTED;
		} else {
			p->value = 0;
		}

	}else {
		p->value = 0;
	}
}
コード例 #12
0
ファイル: xmlrpccmd.c プロジェクト: Astalaseven/pom-ng
xmlrpc_value *xmlrpccmd_core_serial_poll(xmlrpc_env * const envP, xmlrpc_value * const paramArrayP, void * const userData) {


	uint32_t last_serial = 0;
	xmlrpc_decompose_value(envP, paramArrayP, "(i)", &last_serial);
	if (envP->fault_occurred)
		return NULL;
	

	pom_mutex_lock(&xmlrpccmd_serial_lock);
	if (last_serial == xmlrpccmd_serial) {
		// Wait for update
		if (pthread_cond_wait(&xmlrpccmd_serial_cond, &xmlrpccmd_serial_lock)) {
			xmlrpc_faultf(envP, "Error while waiting for serial condition : %s", pom_strerror(errno));
			abort();
			return NULL;
		}
	
	}

	last_serial = xmlrpccmd_serial;
	pom_mutex_unlock(&xmlrpccmd_serial_lock);

	registry_lock();
	pomlog_rlock();

	struct pomlog_entry *last_log = pomlog_get_tail();
	
	xmlrpc_value *res = xmlrpc_build_value(envP, "{s:i,s:i,s:i}",
						"main", last_serial,
						"registry", registry_serial_get(),
						"log", last_log->id);

	pomlog_unlock();
	registry_unlock();

	return res;

}
コード例 #13
0
ファイル: output.c プロジェクト: Astalaseven/pom-ng
int output_cleanup() {
	
	pom_mutex_lock(&output_lock);

	if (output_registry_class)
		registry_remove_class(output_registry_class);
	output_registry_class = NULL;

	while (output_reg_head) {

		struct output_reg *tmp = output_reg_head;
		output_reg_head = tmp->next;

		mod_refcount_dec(tmp->reg_info->mod);

		free(tmp);
	}

	pom_mutex_unlock(&output_lock);

	return POM_OK;

}
コード例 #14
0
ファイル: core.c プロジェクト: k0a1a/pom-ng
int core_queue_packet(struct packet *p, unsigned int flags, unsigned int thread_affinity) {

	
	// Update the counters
	registry_perf_inc(p->input->perf_pkts_in, 1);
	registry_perf_inc(p->input->perf_bytes_in, p->len);

	if (!core_run)
		return POM_ERR;

	debug_core("Queuing packet %p (%u.%06u)", p, pom_ptime_sec(p->ts), pom_ptime_usec(p->ts));

	// Find the right thread to queue to

	struct core_processing_thread *t = NULL;
	if (flags & CORE_QUEUE_HAS_THREAD_AFFINITY) {
		t = core_processing_threads[thread_affinity % core_num_threads];
		pom_mutex_lock(&t->pkt_queue_lock);
	} else {
		static volatile unsigned int start = 0;
		unsigned int i;
		while (1) {
			unsigned int thread_id = start;
			for (i = 0; i < core_num_threads; i++) {
				thread_id++;
				if (thread_id >= core_num_threads)
					thread_id -= core_num_threads;
				t = core_processing_threads[thread_id];
				int res = pthread_mutex_trylock(&t->pkt_queue_lock);
				if (res == EBUSY) {
					// Thread is busy, go to the next one
					continue;
				} else if (res) {
					pomlog(POMLOG_ERR "Error while locking a processing thread pkt_queue mutex : %s", pom_strerror(res));
					abort();
					return POM_ERR;
				}

				// We've got the lock, check if it's ok to queue here
				if (t->pkt_count < CORE_THREAD_PKT_QUEUE_MAX) {
					// Use this thread
					break;
				}

				// Too many packets pending in this thread, go to the next one
				pom_mutex_unlock(&t->pkt_queue_lock);
			}

			if (i < core_num_threads) {
				// We locked on a thread
				start = thread_id;
				break;
			}

			// No thread found
			if (core_pkt_queue_count >= ((CORE_THREAD_PKT_QUEUE_MAX - 1) * core_num_threads)) {
				// Queue full
				if (flags & CORE_QUEUE_DROP_IF_FULL) {
					// TODO add dropped stats
					debug_core("Dropped packet %p (%u.%06u) to thread %u", p, pom_ptime_sec(p->ts), pom_ptime_usec(p->ts));
					return POM_OK;
				}

				// We're not going to drop this. Wait then
				debug_core("All queues full. Waiting ...");
				pom_mutex_lock(&core_pkt_queue_wait_lock);

				// Recheck the count after locking
				if (core_pkt_queue_count >= ((CORE_THREAD_PKT_QUEUE_MAX - 1) * core_num_threads)) {
					int res = pthread_cond_wait(&core_pkt_queue_wait_cond, &core_pkt_queue_wait_lock);
					if (res) {
						pomlog(POMLOG_ERR "Error while waiting for the core pkt_queue condition : %s", pom_strerror(res));
						abort();
					}
				}
				pom_mutex_unlock(&core_pkt_queue_wait_lock);

			}
		}

	}

	// We've got the thread's lock, add it to the queue

	struct core_packet_queue *tmp = NULL;
	if (t->pkt_queue_unused) {
		tmp = t->pkt_queue_unused;
		t->pkt_queue_unused = tmp->next;
	} else {
		tmp = malloc(sizeof(struct core_packet_queue));
		if (!tmp) {
			pom_mutex_unlock(&t->pkt_queue_lock);
			pom_oom(sizeof(struct core_packet_queue));
			return POM_ERR;
		}
	}

	tmp->pkt = p;
	tmp->next = NULL;
	if (t->pkt_queue_tail) {
		t->pkt_queue_tail->next = tmp;
	} else {
		t->pkt_queue_head = tmp;

		// The queue was empty, we need to signal it
		int res = pthread_cond_signal(&t->pkt_queue_cond);
		if (res) {
			pomlog(POMLOG_ERR "Error while signaling the thread pkt_queue restart condition : %s", pom_strerror(res));
			abort();
			return POM_ERR;
		}

	}
	t->pkt_queue_tail = tmp;

	t->pkt_count++;
	__sync_fetch_and_add(&core_pkt_queue_count, 1);

	registry_perf_inc(perf_pkt_queue, 1);

	debug_core("Queued packet %p (%u.%06u) to thread %u", p, pom_ptime_sec(p->ts), pom_ptime_usec(p->ts), t->thread_id);

	pom_mutex_unlock(&t->pkt_queue_lock);

	return POM_OK;
}
コード例 #15
0
ファイル: registry.c プロジェクト: k0a1a/pom-ng
void registry_unlock() {
	pom_mutex_unlock(&registry_global_lock);
}
コード例 #16
0
ファイル: conntrack.c プロジェクト: elfixit/pom-ng
void conntrack_session_refcount_inc(struct conntrack_session *session) {
	pom_mutex_lock(&session->lock);
	session->refcount++;
	pom_mutex_unlock(&session->lock);
}
コード例 #17
0
static int analyzer_docsis_pkt_process(void *obj, struct packet *p, struct proto_process_stack *stack, unsigned int stack_index) {

	struct analyzer *analyzer = obj;
	struct analyzer_docsis_priv *priv = analyzer->priv;

	struct proto_process_stack *s = &stack[stack_index];

	uint8_t *type = PTYPE_UINT8_GETVAL(s->pkt_info->fields_value[proto_docsis_mgmt_field_type]);

	char *mac_dst = PTYPE_MAC_GETADDR(s->pkt_info->fields_value[proto_docsis_mgmt_field_dst]);

	// FIXME : improve this filtering at the source
	// Filter some useless messages we don't care about
	
	if (*type == MMT_UCD2 || *type == MMT_UCD3 || *type == MMT_MDD)
		return POM_OK;

	if (*type != MMT_RNG_RSP) {
		pomlog(POMLOG_DEBUG "Unhandled DOCSIS MGMT message type %u for destination mac %02hhX:%02hhX:%02hhX:%02hhX:%02hhX:%02hhX", *type, mac_dst[0], mac_dst[1], mac_dst[2], mac_dst[3], mac_dst[4], mac_dst[5]);
		return POM_OK;
	}

	// Use the last bits for the modem ID
	uint16_t id = ntohs(*(uint16_t*) (mac_dst + 4)) & ANALYZER_DOCSIS_CM_MASK;

	pom_mutex_lock(&priv->lock);

	struct analyzer_docsis_cm *cm;
	for (cm = priv->cms[id]; cm; cm = cm->next) {
		if (!memcmp(cm->mac, mac_dst, sizeof(cm->mac)))
			break;
	}

	if (!cm) {
		// Cable modem not found !
		cm = malloc(sizeof(struct analyzer_docsis_cm));
		if (!cm) {
			pom_mutex_unlock(&priv->lock);
			pom_oom(sizeof(struct analyzer_docsis_cm));
			return POM_ERR;
		}
		memset(cm, 0, sizeof(struct analyzer_docsis_cm));

		cm->t = timer_alloc(cm, analyzer_docsis_cm_timeout);
		if (!cm->t) {
			pom_mutex_unlock(&priv->lock);
			free(cm);
			return POM_ERR;
		}
	
		cm->analyzer = analyzer;
		memcpy(cm->mac, mac_dst, sizeof(cm->mac));
		cm->t4_multiplier = 1;

		cm->next = priv->cms[id];
		if (cm->next)
			cm->next->prev = cm;

		priv->cms[id] = cm;

		// Announce the new CM
		if (event_has_listener(priv->evt_cm_new)) {
			struct event *evt = event_alloc(priv->evt_cm_new);
			if (!evt) {
				pom_mutex_unlock(&priv->lock);
				return POM_ERR;
			}

			struct data *evt_data = event_get_data(evt);
			PTYPE_MAC_SETADDR(evt_data[analyzer_docsis_cm_new_mac].value, cm->mac);
			data_set(evt_data[analyzer_docsis_cm_new_mac]);
			PTYPE_STRING_SETVAL(evt_data[analyzer_docsis_cm_new_input].value, p->input->name);
			data_set(evt_data[analyzer_docsis_cm_new_input]);

			if (event_process(evt, stack, stack_index, p->ts) != POM_OK) {
				pom_mutex_unlock(&priv->lock);
				return POM_ERR;
			}
		}
	}


	switch (*type) {

		case MMT_RNG_RSP:
			analyzer_docsis_pkt_parse_rng_rsp(priv, cm, p, stack, stack_index);
			break;

		// FIXME If ranging_status is 0 and we receive another msg, probably it's actually registered
		// and we need to call analyzer_docsis_reg_status_update();

	}

	timer_queue_now(cm->t, T4_TIMEOUT * cm->t4_multiplier, p->ts);

	pom_mutex_unlock(&priv->lock);

	return POM_OK;
}
コード例 #18
0
ファイル: conntrack.c プロジェクト: elfixit/pom-ng
int conntrack_get(struct proto_process_stack *stack, unsigned int stack_index) {

	struct proto_process_stack *s = &stack[stack_index];
	struct proto_process_stack *s_prev = &stack[stack_index - 1];
	struct proto_process_stack *s_next = &stack[stack_index + 1];

	if (s->ce)
		return POM_OK;
		
	if (!s->proto || !s->proto->info->ct_info)
		return POM_ERR;

	struct ptype *fwd_value = s->pkt_info->fields_value[s->proto->info->ct_info->fwd_pkt_field_id];
	if (!fwd_value)
		return POM_ERR;

	struct ptype *rev_value = NULL;
	if (s->proto->info->ct_info->rev_pkt_field_id != CONNTRACK_PKT_FIELD_NONE) {
		rev_value = s->pkt_info->fields_value[s->proto->info->ct_info->rev_pkt_field_id];
		if (!rev_value)
			return POM_ERR;
	}

	struct conntrack_tables *ct = s->proto->ct;

	uint32_t hash = conntrack_hash(fwd_value, rev_value, s_prev->ce) % ct->table_size;

	// Lock the specific hash while browsing for a conntrack
	pom_mutex_lock(&ct->locks[hash]);

	// Try to find the conntrack in the forward table

	// Check if we can find this entry in the forward way
	if (ct->table[hash]) {
		s->ce = conntrack_find(ct->table[hash], fwd_value, rev_value, s_prev->ce);
		if (s->ce) {
			s->direction = POM_DIR_FWD;
			s_next->direction = POM_DIR_FWD;
			pom_mutex_lock(&s->ce->lock);
			s->ce->refcount++;
			pom_mutex_unlock(&ct->locks[hash]);
			return POM_OK;;
		}
	}


	// It wasn't found in the forward way, maybe in the reverse direction ?
	if (rev_value) {
		s->ce = conntrack_find(ct->table[hash], rev_value, fwd_value, s_prev->ce);
		if (s->ce) {
			s->direction = POM_DIR_REV;
			s_next->direction = POM_DIR_REV;
			pom_mutex_lock(&s->ce->lock);
			s->ce->refcount++;
			pom_mutex_unlock(&ct->locks[hash]);
			return POM_OK;
		}

	}

	// It's not found in the reverse direction either, let's create it then

	if (s_prev->direction == POM_DIR_REV && rev_value) {
		// This indicates that the parent conntrack matched in a reverse direction
		// Let's keep directions consistent and swap fwd and rev values
		struct ptype *tmp = rev_value;
		rev_value = fwd_value;
		fwd_value = tmp;
	}


	// Alloc the conntrack entry
	struct conntrack_entry *ce = malloc(sizeof(struct conntrack_entry));
	if (!ce) {
		pom_mutex_unlock(&ct->locks[hash]);
		pom_oom(sizeof(struct conntrack_entry));
		return POM_ERR;
	}
	memset(ce, 0, sizeof(struct conntrack_entry));

	if (pom_mutex_init_type(&ce->lock, PTHREAD_MUTEX_ERRORCHECK) != POM_OK) {
		pom_mutex_unlock(&ct->locks[hash]);
		free(ce);
		return POM_ERR;
	}

	struct conntrack_node_list *child = NULL;

	// We shouldn't have to check if the parent still exists as it
	// is supposed to have a refcount since conntrack_get is called after
	// the parent's conntrack_get was called and before conntrack_refcount_dec
	// was called by core_process_stack.
	if (s_prev->ce) {

		child = malloc(sizeof(struct conntrack_node_list));
		if (!child) {
			pthread_mutex_destroy(&ce->lock);
			pom_mutex_unlock(&ct->locks[hash]);
			free(ce);
			pom_oom(sizeof(struct conntrack_node_list));
			return POM_ERR;
		}
		memset(child, 0, sizeof(struct conntrack_node_list));

		child->ce = ce;
		child->ct = s->proto->ct;
		child->hash = hash;

		ce->parent = malloc(sizeof(struct conntrack_node_list));
		if (!ce->parent) {
			pthread_mutex_destroy(&ce->lock);
			pom_mutex_unlock(&ct->locks[hash]);
			free(child);
			free(ce);
			pom_oom(sizeof(struct conntrack_node_list));
			return POM_ERR;
		}
		ce->parent->ce = s_prev->ce;
		ce->parent->ct = s_prev->ce->proto->ct;
		ce->parent->hash = s_prev->ce->hash;

	}

	ce->proto = s->proto;

	ce->hash = hash;

	struct conntrack_list *lst = NULL;

	ce->fwd_value = ptype_alloc_from(fwd_value);
	if (!ce->fwd_value)
		goto err;

	if (rev_value) {
		ce->rev_value = ptype_alloc_from(rev_value);
		if (!ce->rev_value)
			goto err;
	}
	// Alloc the list node
	lst = malloc(sizeof(struct conntrack_list));
	if (!lst) {
		ptype_cleanup(ce->fwd_value);
		pom_oom(sizeof(struct conntrack_list));
		goto err;
	}
	memset(lst, 0, sizeof(struct conntrack_list));
	lst->ce = ce;

	// Insert in the conntrack table
	lst->next = ct->table[hash];
	if (lst->next) {
		lst->next->prev = lst;
		registry_perf_inc(s->proto->perf_conn_hash_col, 1);
	}
	ct->table[hash] = lst;

	// Add the child to the parent if any
	if (child) {
		pom_mutex_lock(&s_prev->ce->lock);
		if (!s_prev->ce->refcount)
			pomlog(POMLOG_WARN "Internal error, the parent is supposed to have a refcount > 0");
		child->next = s_prev->ce->children;
		if (child->next)
			child->next->prev = child;
		s_prev->ce->children = child;
		pom_mutex_unlock(&s_prev->ce->lock);
	}

	// Unlock the table
	if (s_prev->ce) {
		debug_conntrack("Allocated conntrack %p with parent %p", ce, s_prev->ce);
	} else {
		debug_conntrack("Allocated conntrack %p with no parent", ce);
	}
	pom_mutex_lock(&ce->lock);
	ce->refcount++;
	pom_mutex_unlock(&ct->locks[hash]);

	s->ce = ce;
	s->direction = s_prev->direction;

	// Propagate the direction to the payload as well
	s_next->direction = s->direction;
	
	registry_perf_inc(ce->proto->perf_conn_cur, 1);
	registry_perf_inc(ce->proto->perf_conn_tot, 1);

	return POM_OK;

err:
	pom_mutex_unlock(&ct->locks[hash]);

	pthread_mutex_destroy(&ce->lock);
	if (child)
		free(child);

	if (lst)
		free(lst);

	if (ce->parent)
		free(ce->parent);
	
	if (ce->fwd_value)
		ptype_cleanup(ce->fwd_value);

	if (ce->rev_value)
		ptype_cleanup(ce->rev_value);

	free(ce);

	return POM_ERR;
}
コード例 #19
0
ファイル: analyzer_arp.c プロジェクト: dkarametos/pom-ng
static int analyzer_arp_pkt_process(void *obj, struct packet *p, struct proto_process_stack *stack, unsigned int stack_index) {

	struct analyzer *analyzer = obj;
	struct analyzer_arp_priv *priv = analyzer->priv;

	struct proto_process_stack *s = &stack[stack_index];
	struct proto_process_stack *s_prev = &stack[stack_index - 1];

	struct in_addr arp_ip = PTYPE_IPV4_GETADDR(s->pkt_info->fields_value[proto_arp_field_sender_proto_addr]);

	// Discard bogon 0.0.0.0
	if (!arp_ip.s_addr)
		return POM_OK;

	// Find that IP in the table
	uint32_t id = arp_ip.s_addr & ANALYZER_ARP_HOST_MASK;
	char *arp_mac = PTYPE_MAC_GETADDR(s->pkt_info->fields_value[proto_arp_field_sender_hw_addr]);

	uint16_t vlan = 0;
	if (s_prev->proto == priv->proto_vlan)
		vlan = *PTYPE_UINT16_GETVAL(s_prev->pkt_info->fields_value[proto_vlan_field_vid]);

	pom_mutex_lock(&priv->lock);

	struct analyzer_arp_host *host;
	for (host = priv->hosts[id]; host; host = host->next) {
		if (host->ip.s_addr == arp_ip.s_addr && host->vlan == vlan)
			break;
	}

	if (!host) {
		// Host not found !
		host = malloc(sizeof(struct analyzer_arp_host));
		if (!host) {
			pom_mutex_unlock(&priv->lock);
			pom_oom(sizeof(struct analyzer_arp_host));
			return POM_ERR;
		}
		memset(host, 0, sizeof(struct analyzer_arp_host));

		host->ip.s_addr = arp_ip.s_addr;
		memcpy(host->mac, arp_mac, sizeof(host->mac));
		host->vlan = vlan;

		host->next = priv->hosts[id];
		if (host->next)
			host->next->prev = host;

		priv->hosts[id] = host;
		pom_mutex_unlock(&priv->lock);

		// Announce the new station
	
		if (event_has_listener(priv->evt_new_sta)) {
			struct event *evt = event_alloc(priv->evt_new_sta);
			if (!evt)
				return POM_ERR;

			struct data *evt_data = evt->data;
			ptype_copy(evt_data[analyzer_arp_new_sta_mac_addr].value, s->pkt_info->fields_value[proto_arp_field_sender_hw_addr]);
			data_set(evt_data[analyzer_arp_new_sta_mac_addr]);
			ptype_copy(evt_data[analyzer_arp_new_sta_ip_addr].value, s->pkt_info->fields_value[proto_arp_field_sender_proto_addr]);
			data_set(evt_data[analyzer_arp_new_sta_ip_addr]);
			PTYPE_UINT16_SETVAL(evt_data[analyzer_arp_new_sta_vlan].value, vlan);
			data_set(evt_data[analyzer_arp_new_sta_vlan]);
			PTYPE_STRING_SETVAL(evt_data[analyzer_arp_new_sta_input].value, p->input->name);
			data_set(evt_data[analyzer_arp_new_sta_input]);
			if (event_process(evt, stack, stack_index) != POM_OK)
				return POM_ERR;
		}
		
		// Nothing else to do
		return POM_OK;
	}

	// Host was found, check mac
	if (memcmp(host->mac, arp_mac, sizeof(host->mac))) {
		if (event_has_listener(priv->evt_sta_changed)) {
			struct event *evt = event_alloc(priv->evt_sta_changed);
			if (!evt) {
				pom_mutex_unlock(&priv->lock);
				return POM_ERR;
			}

			struct data *evt_data = evt->data;
			PTYPE_MAC_SETADDR(evt_data[analyzer_arp_sta_changed_old_mac_addr].value, host->mac);
			data_set(evt_data[analyzer_arp_sta_changed_old_mac_addr]);
			ptype_copy(evt_data[analyzer_arp_sta_changed_new_mac_addr].value, s->pkt_info->fields_value[proto_arp_field_sender_hw_addr]);
			data_set(evt_data[analyzer_arp_sta_changed_new_mac_addr]);
			ptype_copy(evt_data[analyzer_arp_sta_changed_ip_addr].value, s->pkt_info->fields_value[proto_arp_field_sender_proto_addr]);
			data_set(evt_data[analyzer_arp_sta_changed_ip_addr]);
			PTYPE_UINT16_SETVAL(evt_data[analyzer_arp_sta_changed_vlan].value, vlan);
			data_set(evt_data[analyzer_arp_sta_changed_vlan]);
			PTYPE_STRING_SETVAL(evt_data[analyzer_arp_sta_changed_input].value, p->input->name);
			data_set(evt_data[analyzer_arp_sta_changed_input]);

			if (event_process(evt, stack, stack_index) != POM_OK) {
				pom_mutex_unlock(&priv->lock);
				return POM_ERR;
			}
		}
		memcpy(host->mac, arp_mac, sizeof(host->mac));
	}
	


	pom_mutex_unlock(&priv->lock);
	return POM_OK;
}
コード例 #20
0
ファイル: core.c プロジェクト: gmsoft-tuxicoman/pom-ng
int core_set_state(enum core_state state) {

	int res = POM_OK;

	pom_mutex_lock(&core_state_lock);

	if (core_cur_state == state) {
		pomlog(POMLOG_DEBUG "Core state unchanged : %u", state);
		pom_mutex_unlock(&core_state_lock);
		return POM_OK;
	}

	if (core_cur_state == core_state_idle && state == core_state_finishing) {
		pom_mutex_unlock(&core_state_lock);
		return POM_OK;
	}

	core_cur_state = state;
	pomlog(POMLOG_DEBUG "Core state changed to %u", state);
	if (pthread_cond_broadcast(&core_state_cond)) {
		pomlog(POMLOG_ERR "Unable to signal core state condition : %s", pom_strerror(errno));
		pom_mutex_unlock(&core_state_lock);
		return POM_ERR;
	}
	pom_mutex_unlock(&core_state_lock);

	if (state == core_state_idle) {

		res = core_processing_stop();

		ptime now = pom_gettimeofday();

		int i;
		for (i = 0; i < CORE_PROCESS_THREAD_MAX; i++)
			core_clock[i] = 0;

		if (core_start_time) {
			ptime runtime = now - core_start_time;

			pomlog(POMLOG_INFO "Core was running for %u.%06u secs", pom_ptime_sec(runtime), pom_ptime_usec(runtime));
		}

	} else if (state == core_state_running) {
		core_start_time = pom_gettimeofday();
		res = core_processing_start();
	} else if (state == core_state_finishing) {
		// Signal all the threads
		unsigned int i;
		for (i = 0; i < core_num_threads; i++) {
			struct core_processing_thread *t = core_processing_threads[i];

			pom_mutex_lock(&t->pkt_queue_lock);
			int res = pthread_cond_broadcast(&t->pkt_queue_cond);
			pom_mutex_unlock(&t->pkt_queue_lock);
			if (res) {
				pomlog(POMLOG_ERR "Error while broadcasting restart condition after set state");
				abort();
			}
		}
	}
	return res;
}
コード例 #21
0
ファイル: conntrack.c プロジェクト: elfixit/pom-ng
void conntrack_unlock(struct conntrack_entry *ce) {
	pom_mutex_unlock(&ce->lock);
}
コード例 #22
0
ファイル: core.c プロジェクト: k0a1a/pom-ng
void *core_processing_thread_func(void *priv) {

	struct core_processing_thread *tpriv = priv;

	if (packet_info_pool_init()) {
		halt("Error while initializing the packet_info_pool", 1);
		return NULL;
	}

	registry_perf_inc(perf_thread_active, 1);

	pom_mutex_lock(&tpriv->pkt_queue_lock);

	while (core_run) {
		
		while (!tpriv->pkt_queue_head) {
			// We are not active while waiting for a packet
			registry_perf_dec(perf_thread_active, 1);

			debug_core("thread %u : waiting", tpriv->thread_id);

			if (registry_perf_getval(perf_thread_active) == 0) {
				if (core_get_state() == core_state_finishing)
					core_set_state(core_state_idle);
			}

			if (!core_run) {
				pom_mutex_unlock(&tpriv->pkt_queue_lock);
				goto end;
			}

			int res = pthread_cond_wait(&tpriv->pkt_queue_cond, &tpriv->pkt_queue_lock);
			if (res) {
				pomlog(POMLOG_ERR "Error while waiting for restart condition : %s", pom_strerror(res));
				abort();
				return NULL;
			}
			registry_perf_inc(perf_thread_active, 1);
		}


		// Dequeue a packet
		struct core_packet_queue *tmp = tpriv->pkt_queue_head;
		tpriv->pkt_queue_head = tmp->next;
		if (!tpriv->pkt_queue_head)
			tpriv->pkt_queue_tail = NULL;


		// Add it to the unused list
		tmp->next = tpriv->pkt_queue_unused;
		tpriv->pkt_queue_unused = tmp;

		tpriv->pkt_count--;

		registry_perf_dec(perf_pkt_queue, 1);

		__sync_fetch_and_sub(&core_pkt_queue_count, 1);

		if (tpriv->pkt_count < CORE_THREAD_PKT_QUEUE_MIN) {

			pom_mutex_lock(&core_pkt_queue_wait_lock);
			// Tell the input processes that they can continue queuing packets
			int res = pthread_cond_broadcast(&core_pkt_queue_wait_cond);
			if (res) {
				pomlog(POMLOG_ERR "Error while signaling the main pkt_queue condition : %s", pom_strerror(res));
				abort();
			}
			pom_mutex_unlock(&core_pkt_queue_wait_lock);
		}

		// Keep track of our packet
		struct packet *pkt = tmp->pkt;

		debug_core("thread %u : Processing packet %p (%u.%06u)", tpriv->thread_id, pkt, pom_ptime_sec(pkt->ts), pom_ptime_usec(pkt->ts));
		pom_mutex_unlock(&tpriv->pkt_queue_lock);

		// Lock the processing lock
		pom_rwlock_rlock(&core_processing_lock);

		// Update the current clock
		if (core_clock[tpriv->thread_id] < pkt->ts) // Make sure we keep it monotonous
			core_clock[tpriv->thread_id] = pkt->ts;

		//pomlog(POMLOG_DEBUG "Thread %u processing ...", pthread_self());
		if (core_process_packet(pkt) == POM_ERR) {
			core_run = 0;
			pom_rwlock_unlock(&core_processing_lock);
			break;
		}

		// Process timers
		if (timers_process() != POM_OK) {
			pom_rwlock_unlock(&core_processing_lock);
			break;
		}

		pom_rwlock_unlock(&core_processing_lock);

		if (packet_release(pkt) != POM_OK) {
			pomlog(POMLOG_ERR "Error while releasing the packet");
			break;
		}
		
		debug_core("thread %u : Processed packet %p (%u.%06u)", tpriv->thread_id, pkt, pom_ptime_sec(pkt->ts), pom_ptime_usec(pkt->ts));
		// Re-lock our queue for the next run
		pom_mutex_lock(&tpriv->pkt_queue_lock);

	}

	halt("Processing thread encountered an error", 1);
end:
	packet_info_pool_cleanup();

	return NULL;
}
コード例 #23
0
ファイル: mod.c プロジェクト: Astalaseven/pom-ng
struct mod_reg *mod_load(char *name) {

	pom_mutex_lock(&mod_reg_lock);
	struct mod_reg *tmp;
	for (tmp = mod_reg_head; tmp && strcmp(tmp->name, name); tmp = tmp->next);
	if (tmp) {
		pom_mutex_unlock(&mod_reg_lock);
		pomlog(POMLOG_WARN "Module %s is already registered", name);
		return NULL;
	}
	pom_mutex_unlock(&mod_reg_lock);

	char filename[FILENAME_MAX];
	memset(filename, 0, FILENAME_MAX);

	char *env_libdir = getenv(MOD_LIBDIR_ENV_VAR);

	if (env_libdir)
		strcpy(filename, env_libdir);
	else
		strcpy(filename, POM_LIBDIR);

	if (filename[strlen(filename) - 1] != '/')
		strcat(filename, "/");

	strcat(filename, name);
	strcat(filename, POM_LIB_EXT);

	void *dl_handle = dlopen(filename, RTLD_FLAGS);

	if (!dl_handle) {
		pomlog(POMLOG_ERR "Unable to load module %s : %s", name, dlerror());
		return NULL;
	}

	dlerror();

	char func_name[FILENAME_MAX];
	strcpy(func_name, name);
	strcat(func_name, "_reg_info");

	struct mod_reg_info* (*mod_reg_func) () = NULL;
	mod_reg_func = dlsym(dl_handle, func_name);
	if (!mod_reg_func) {
		dlclose(dl_handle);
		pomlog(POMLOG_ERR "Function %s not found in module %s", func_name, filename);
		return NULL;
	}

	struct mod_reg_info *reg_info = mod_reg_func();
	if (!reg_info) {
		dlclose(dl_handle);
		pomlog(POMLOG_ERR "Function %s returned NULL", func_name);
		return NULL;
	}

	if (reg_info->api_ver != MOD_API_VER) {
		dlclose(dl_handle);
		pomlog(POMLOG_ERR "API version of module %s does not match : expected %u got %u", name, MOD_API_VER, reg_info->api_ver);
		return NULL;
	}

	if (mod_load_dependencies(reg_info->dependencies) != POM_OK) {
		dlclose(dl_handle);
		return NULL;
	}

	struct mod_reg *reg = malloc(sizeof(struct mod_reg));
	if (!reg) {
		dlclose(dl_handle);
		pomlog(POMLOG_ERR "Not enough memory to allocate struct mod_reg");
		return NULL;
	}

	memset(reg, 0, sizeof(struct mod_reg));

	reg->priv = dl_handle;
	reg->filename = strdup(filename);
	reg->name = strdup(name);
	reg->info = reg_info;

	if (!reg->filename || !reg->name || pthread_mutex_init(&reg->lock, NULL)) {
		if (reg->filename)
			free(reg->filename);
		if (reg->name)
			free(reg->name);
		free(reg);
		dlclose(dl_handle);

		pomlog(POMLOG_ERR "Not enough memory to allocate name and filename of struct mod_reg or failed to initialize the lock");

		return NULL;
	}

	pom_mutex_lock(&mod_reg_lock);

	reg->next = mod_reg_head;
	mod_reg_head = reg;
	if (reg->next)
		reg->next->prev = reg;

	pom_mutex_unlock(&mod_reg_lock);

	pomlog(POMLOG_DEBUG "Module %s loaded, registering components ...", reg->name);

	if (reg->info->register_func(reg) != POM_OK) {
		pomlog(POMLOG_WARN "Error while registering the components of module %s", reg->name);
		mod_unload(reg);
		return NULL;
	}

	pom_mutex_lock(&reg->lock);
	if (!reg->refcount) {
		pom_mutex_unlock(&reg->lock);
		pomlog(POMLOG_DEBUG "Module %s did not register anything. Unloading it", reg->name);
		mod_unload(reg);
		return NULL;
	}

	pom_mutex_unlock(&reg->lock);

	return reg;

}
コード例 #24
0
ファイル: conntrack.c プロジェクト: elfixit/pom-ng
int conntrack_get_unique_from_parent(struct proto_process_stack *stack, unsigned int stack_index) {

	struct conntrack_node_list *child = NULL;
	struct conntrack_list *lst = NULL;

	struct proto_process_stack *s = &stack[stack_index];
	struct proto_process_stack *s_prev = &stack[stack_index - 1];

	struct conntrack_entry *parent = s_prev->ce;

	if (!s->proto) {
		pomlog(POMLOG_ERR "Cannot allocate conntrack for NULL proto");
		return POM_ERR;
	}

	if (!parent) {
		pomlog(POMLOG_ERR "Cannot allocate unique conntrack without a parent");
		return POM_ERR;
	}


	if (s->ce) { // This should only occur in the case that an expectation matched
		// Make sure the conntrack is locked
		int res = pthread_mutex_trylock(&s->ce->lock);
		if (res && res != EBUSY && res == EDEADLK) {
			pomlog(POMLOG_ERR "Error while locking the conntrack : %s", pom_strerror(res));
			return POM_ERR;
		}
		return POM_OK;
	}

	conntrack_lock(parent);

	struct conntrack_tables *ct = s->proto->ct;
	struct conntrack_entry *res = NULL;

	// Look for the conntrack
	
	if (parent->children) {
		struct conntrack_node_list *child = parent->children;
		for (child = parent->children; child && child->ce->proto != s->proto; child = child->next);
		if (child)
			res = child->ce;
	} 

	if (!res) {

		// Alloc the conntrack
		res = malloc(sizeof(struct conntrack_entry));
		if (!res) {
			pom_oom(sizeof(struct conntrack_entry));
			goto err;
		}

		memset(res, 0, sizeof(struct conntrack_entry));
		res->proto = s->proto;

		if (pom_mutex_init_type(&res->lock, PTHREAD_MUTEX_ERRORCHECK) != POM_OK)
			goto err;

		// Alloc the child list
		child = malloc(sizeof(struct conntrack_node_list));
		if (!child) 
			goto err;
		
		memset(child, 0, sizeof(struct conntrack_node_list));
		child->ce = res;
		child->ct = ct;

		// Alloc the parent node
		res->parent = malloc(sizeof(struct conntrack_node_list));
		if (!res->parent) {
			free(child);
			goto err;
		}
		memset(res->parent, 0, sizeof(struct conntrack_node_list));
		res->parent->ce = parent;
		res->parent->ct = parent->proto->ct;
		res->parent->hash = parent->hash;

		// Alloc the list node
		lst = malloc(sizeof(struct conntrack_list));
		if (!lst) {
			pom_oom(sizeof(struct conntrack_list));
			goto err;
		}
		memset(lst, 0, sizeof(struct conntrack_list));
		lst->ce = res;

		// Add the child to the parent
		child->next = parent->children;
		if (child->next)
			child->next->prev = child;
		parent->children = child;

		// Add the conntrack to the table
		pom_mutex_lock(&ct->locks[0]);
		lst->next = ct->table[0];
		if (lst->next)
			lst->next->prev = lst;
		ct->table[0] = lst;
		pom_mutex_unlock(&ct->locks[0]);
		debug_conntrack("Allocated conntrack %p with parent %p (uniq child)", res, parent);

		registry_perf_inc(s->proto->perf_conn_cur, 1);
		registry_perf_inc(s->proto->perf_conn_tot, 1);

	}

	conntrack_unlock(parent);

	conntrack_lock(res);
	res->refcount++;
	s->ce = res;
	s->direction = s_prev->direction;

	struct proto_process_stack *s_next = &stack[stack_index + 1];
	s_next->direction = s->direction;

	return POM_OK;

err:
	if (res) {
		pthread_mutex_destroy(&res->lock);
		free(res);
	}

	if (child)
		free(child);
	conntrack_unlock(parent);

	return POM_ERR;
}
コード例 #25
0
ファイル: conntrack.c プロジェクト: elfixit/pom-ng
int conntrack_get_unique(struct proto_process_stack *stack, unsigned int stack_index) {

	struct proto_process_stack *s = &stack[stack_index];
	struct proto_process_stack *s_prev = &stack[stack_index - 1];
	if (s_prev->ce) {
		pomlog(POMLOG_ERR "conntrack_get_unique() can only be used for link protocols");
		return POM_ERR;
	}

	if (s->ce) { // This should only occur in the case that an expectation matched
		// Make sure the conntrack is locked
		int res = pthread_mutex_trylock(&s->ce->lock);
		if (res && res != EBUSY && res == EDEADLK) {
			pomlog(POMLOG_ERR "Error while locking the conntrack : %s", pom_strerror(res));
			return POM_ERR;
		}
		return POM_OK;
	}

	struct conntrack_tables *ct = s->proto->ct;
	pom_mutex_lock(&ct->locks[0]);

	struct conntrack_list *lst = ct->table[0];

	for (lst = ct->table[0]; lst && lst->ce->parent; lst = lst->next);

	if (lst) {
		// Conntrack found
		s->ce = lst->ce;
		pom_mutex_unlock(&ct->locks[0]);
	} else {
		// Alloc the conntrack
		struct conntrack_entry *res = NULL;
		res = malloc(sizeof(struct conntrack_entry));
		if (!res) {
			pom_oom(sizeof(struct conntrack_entry));
			pom_mutex_unlock(&ct->locks[0]);
			return POM_ERR;
		}

		memset(res, 0, sizeof(struct conntrack_entry));
		res->proto = s->proto;

		if (pom_mutex_init_type(&res->lock, PTHREAD_MUTEX_ERRORCHECK) != POM_OK) {
			pom_mutex_unlock(&ct->locks[0]);
			return POM_ERR;
		}

		// Alloc the list node
		lst = malloc(sizeof(struct conntrack_list));
		if (!lst) {
			pom_oom(sizeof(struct conntrack_list));
			pom_mutex_unlock(&ct->locks[0]);
			return POM_ERR;
		}
		memset(lst, 0, sizeof(struct conntrack_list));
		lst->ce = res;

		// Add the conntrack to the table
		lst->next = ct->table[0];
		if (lst->next)
			lst->next->prev = lst;
		ct->table[0] = lst;
		pom_mutex_unlock(&ct->locks[0]);
		debug_conntrack("Allocated unique conntrack %p", res);

		registry_perf_inc(s->proto->perf_conn_cur, 1);
		registry_perf_inc(s->proto->perf_conn_tot, 1);
		s->ce = res;
	}

	conntrack_lock(s->ce);
	s->ce->refcount++;

	struct proto_process_stack *s_next = &stack[stack_index + 1];
	s_next->direction = s->direction;

	return POM_OK;

}
コード例 #26
0
ファイル: packet.c プロジェクト: dkarametos/pom-ng
int packet_buffer_pool_get(struct packet *pkt, size_t size, size_t align_offset) {

	if (align_offset >= PACKET_BUFFER_ALIGNMENT) {
		pomlog(POMLOG_ERR "Alignment offset too big");
		return POM_ERR;
	}

	size_t tot_size = size + align_offset + PACKET_BUFFER_ALIGNMENT;

	if (tot_size > packet_buffer_pool_size[PACKET_BUFFER_POOL_COUNT - 1]) {
		pomlog(POMLOG_ERR "Requested size too big : %llu", size);
		return POM_ERR;
	}

	int i;
	for (i = 0; i < PACKET_BUFFER_POOL_COUNT && packet_buffer_pool_size[i] < tot_size; i++);

	struct packet_buffer *pb = NULL;

	pom_mutex_lock(&packet_buffer_pool_mutex);

	if (!packet_buffer_pool[i].unused) {

		// Allocate a new one
		size_t alloc_size = packet_buffer_pool_size[i] + sizeof(struct packet_buffer);

		pb = malloc(alloc_size);
		if (!pb) {
			pom_mutex_unlock(&packet_buffer_pool_mutex);
			pom_oom(alloc_size);
			return POM_ERR;
		}
		memset(pb, 0, alloc_size);

		pb->base_buff = (void*)pb + sizeof(struct packet_buffer);
		pb->aligned_buff = (void*) (((long)pb->base_buff & ~(PACKET_BUFFER_ALIGNMENT - 1)) + PACKET_BUFFER_ALIGNMENT + align_offset);
		pb->pool_id = i;

	} else {
		// Reuse an unused one
		pb = packet_buffer_pool[i].unused;
		if (pb->next)
			pb->next->prev = NULL;

		packet_buffer_pool[i].unused = pb->next;


	}

	// Put this one in the used list

	pb->next = packet_buffer_pool[i].used;
	if (pb->next)
		pb->next->prev = pb;
	packet_buffer_pool[i].used = pb;

	pom_mutex_unlock(&packet_buffer_pool_mutex);

	pkt->pkt_buff = pb;
	pkt->len = size;
	pkt->buff = pb->aligned_buff;

	return POM_OK;
}
コード例 #27
0
ファイル: timer.c プロジェクト: Astalaseven/pom-ng
int timer_queue_now(struct timer *t, unsigned int expiry, ptime now) {


	pom_mutex_lock(&timer_main_lock);

	// Timer is still queued, dequeue it
	if (t->queue) {
		if (t->prev) {
			t->prev->next = t->next;
		} else {
			t->queue->head = t->next;
			if (t->queue->head)
				t->queue->head->prev = NULL;
		}

		if (t->next) {
			t->next->prev = t->prev;
		} else {
			t->queue->tail = t->prev;
			if (t->queue->tail)
				t->queue->tail->next = NULL;
			
		}
		t->queue = NULL;
		t->prev = NULL;
		t->next = NULL;
	} else {
		registry_perf_inc(perf_timer_queued, 1);
	}

	struct timer_queue *tq = timer_queues;

	// First find the right queue or create it
	
	if (!tq) {

		// There is no queue yet
		tq = malloc(sizeof(struct timer_queue));
		if (!tq) {
			pom_mutex_unlock(&timer_main_lock);
			pom_oom(sizeof(struct timer_queue));
			return POM_ERR;
		}
		memset(tq, 0, sizeof(struct timer_queue));
		timer_queues = tq;

		tq->expiry = expiry;

		registry_perf_inc(perf_timer_queues, 1);

	} else {

		while (tq) {
			
			if (tq->expiry == expiry) { // The right queue already exists
				
				break;

			} else if (tq->expiry > expiry) { // The right queue doesn't exists and we are too far in the list

				struct timer_queue *tmp;
				tmp = malloc(sizeof(struct timer_queue));
				if (!tmp) {
					pom_oom(sizeof(struct timer_queue));
					pom_mutex_unlock(&timer_main_lock);
					return POM_ERR;
				}
				memset(tmp, 0, sizeof(struct timer_queue));

				tmp->prev = tq->prev;
				tmp->next = tq;
				tq->prev = tmp;

				if (tmp->prev)
					tmp->prev->next = tmp;
				else
					timer_queues = tmp;


				tq = tmp;
				tq->expiry = expiry;

				registry_perf_inc(perf_timer_queues, 1);

				break;
			
			} else if (!tq->next) { // Looks like we are at the end of our list

				struct timer_queue *tmp;
				tmp = malloc(sizeof(struct timer_queue));
				if (!tmp) {
					pom_oom(sizeof(struct timer_queue));
					pom_mutex_unlock(&timer_main_lock);
					return POM_ERR;
				}
				memset(tmp, 0, sizeof(struct timer_queue));

				tmp->prev = tq;
				
				tq->next = tmp;

				tq = tmp;

				tq->expiry = expiry;

				registry_perf_inc(perf_timer_queues, 1);
				
				break;
			}

			tq = tq->next;
		}

	}

	// Now we can queue the timer
	
	if (tq->head == NULL) {
		tq->head = t;
		tq->tail = t;
	} else {
		t->prev = tq->tail;
		tq->tail = t;
		t->prev->next = t;
	}

	// Update the expiry time
	
	t->expires = now + (expiry * 1000000UL);
	t->queue = tq;
	pom_mutex_unlock(&timer_main_lock);


	return POM_OK;
}
コード例 #28
0
ファイル: packet.c プロジェクト: dkarametos/pom-ng
int packet_stream_process_packet(struct packet_stream *stream, struct packet *pkt, struct proto_process_stack *stack, unsigned int stack_index, uint32_t seq, uint32_t ack) {

	if (!stream || !pkt || !stack)
		return PROTO_ERR;

	debug_stream("thread %p, entry %p, packet %u.%06u, seq %u, ack %u : start", pthread_self(), stream, pkt->ts.tv_sec, pkt->ts.tv_usec, seq, ack);

	struct proto_process_stack *cur_stack = &stack[stack_index];
	int direction = cur_stack->direction;

	int must_wait = 0;

	pom_mutex_lock(&stream->wait_lock);

	int res = pthread_mutex_trylock(&stream->lock);
	if (res == EBUSY) {
		// Already locked, let's wait a bit
		must_wait = 1;
	} else if (res) {
		pomlog(POMLOG_ERR "Error while locking packet stream lock : %s", pom_strerror(errno));
		abort();
		return POM_ERR;
	} else {

		// We got the processing lock. But was it really this thread's turn ?

		struct packet_stream_thread_wait *tmp = stream->wait_list_head;
		// A thread with a packet preceding ours is waiting
		if (tmp && (tmp->ts.tv_sec < pkt->ts.tv_sec || (tmp->ts.tv_sec == pkt->ts.tv_sec && tmp->ts.tv_usec < pkt->ts.tv_usec))) {
			// No it wasn't, release it and signal the right thread
			must_wait = 2;
			pom_mutex_unlock(&stream->lock);
			debug_stream("thread %p, entry %p : signaling thread %p", pthread_self(), stream, stream->wait_list_head->thread);
			pthread_cond_broadcast(&stream->wait_list_head->cond);
		} else {
			// Yes it was. YAY !
			pom_mutex_unlock(&stream->wait_lock);
		}

	}


	if (must_wait) {

		// Add ourself in the waiting list
		struct packet_stream_thread_wait *lst = NULL;
		if (stream->wait_list_unused) {
			lst = stream->wait_list_unused;
			stream->wait_list_unused = lst->next;
			lst->next = NULL;
		} else {
			lst = malloc(sizeof(struct packet_stream_thread_wait));
			if (!lst) {
				pom_oom(sizeof(struct packet_stream_thread_wait));
				pom_mutex_unlock(&stream->wait_lock);
				return POM_ERR;
			}
			memset(lst, 0, sizeof(struct packet_stream_thread_wait));
			
			if (pthread_cond_init(&lst->cond, NULL)) {
				pomlog(POMLOG_ERR "Error while initializing wait list condition : %s", pom_strerror(errno));
				free(lst);
				return POM_ERR;
			}
		}
		memcpy(&lst->ts, &pkt->ts, sizeof(struct timeval));
		lst->thread = pthread_self();

		struct packet_stream_thread_wait *tmp;
		for (tmp = stream->wait_list_head; tmp && (tmp->ts.tv_sec < lst->ts.tv_sec || (tmp->ts.tv_sec == lst->ts.tv_sec && tmp->ts.tv_usec < lst->ts.tv_usec)); tmp = tmp->next);
		if (tmp) {

			lst->prev = tmp->prev;
			if (lst->prev)
				lst->prev->next = lst;
			else
				stream->wait_list_head = lst;

			lst->next = tmp;
			lst->next->prev = lst;
		} else {
			lst->prev = stream->wait_list_tail;
			if (lst->prev)
				lst->prev->next = lst;
			else
				stream->wait_list_head = lst;

			stream->wait_list_tail = lst;
		}


		while (1) {
			debug_stream("thread %p, entry %p, packet %u.%06u, seq %u, ack %u : waiting", pthread_self(), stream, pkt->ts.tv_sec, pkt->ts.tv_usec, seq, ack);
			if (pthread_cond_wait(&lst->cond, &stream->wait_lock)) {
				pomlog(POMLOG_ERR "Error while waiting for the packet stream wait cond : %s", pom_strerror(errno));
				abort();
				return POM_ERR;
			}

			if (stream->wait_list_head != lst) {
				// There is a small chance that another stream lock stream->wait_lock while pthread_cond_wait acquires it
				// If we are not the right thread, then simply signal the right one and wait again for our turn
				debug_stream("thread %p, entry %p, packet %u.%06u, seq %u, ack %u : wrong thread woke up", pthread_self(), stream, pkt->ts.tv_sec, pkt->ts.tv_usec, seq, ack);
				pthread_cond_broadcast(&stream->wait_list_head->cond);
				continue;
			}
			break;
		}

		tmp = stream->wait_list_head;
		stream->wait_list_head = tmp->next;
		if (stream->wait_list_head)
			stream->wait_list_head->prev = NULL;
		else
			stream->wait_list_tail = NULL;

		tmp->next = stream->wait_list_unused;
		tmp->prev = NULL;
		stream->wait_list_unused = tmp;

		pom_mutex_unlock(&stream->wait_lock);
		pom_mutex_lock(&stream->lock);

	}

	debug_stream("thread %p, entry %p, packet %u.%06u, seq %u, ack %u : start locked", pthread_self(), stream, pkt->ts.tv_sec, pkt->ts.tv_usec, seq, ack);

	// Update the stream flags
	if (stream->flags & PACKET_FLAG_STREAM_BIDIR) {

		// Update flags
		if (direction == POM_DIR_FWD && !(stream->flags & PACKET_FLAG_STREAM_GOT_FWD_DIR)) {
			stream->flags |= PACKET_FLAG_STREAM_GOT_FWD_DIR;
		} else if (direction == POM_DIR_REV && !(stream->flags & PACKET_FLAG_STREAM_GOT_REV_DIR)) {
			stream->flags |= PACKET_FLAG_STREAM_GOT_REV_DIR;
		}

	}

	// Put this packet in our struct packet_stream_pkt
	struct packet_stream_pkt spkt = {0};
	spkt.pkt = pkt;
	spkt.seq = seq;
	spkt.ack = ack;
	spkt.plen = cur_stack->plen;
	spkt.stack = stack;
	spkt.stack_index = stack_index;


	// Check if the packet is worth processing
	uint32_t cur_seq = stream->cur_seq[direction];
	if (cur_seq != seq) {
		if (packet_stream_is_packet_old_dupe(stream, &spkt, direction)) {
			// cur_seq is after the end of the packet, discard it
			packet_stream_end_process_packet(stream);
			debug_stream("thread %p, entry %p, packet %u.%06u, seq %u, ack %u : discard", pthread_self(), stream, pkt->ts.tv_sec, pkt->ts.tv_usec, seq, ack);
			return PROTO_OK;
		}

		if (packet_stream_remove_dupe_bytes(stream, &spkt, direction) == POM_ERR) {
			packet_stream_end_process_packet(stream);
			return PROTO_ERR;
		}
	}


	// Ok let's process it then

	// Check if it is the packet we're waiting for
	if (packet_stream_is_packet_next(stream, &spkt, direction)) {

		// Process it
		stream->cur_seq[direction] += cur_stack->plen;
		stream->cur_ack[direction] = ack;
		debug_stream("thread %p, entry %p, packet %u.%06u, seq %u, ack %u : process", pthread_self(), stream, pkt->ts.tv_sec, pkt->ts.tv_usec, seq, ack);

		int res = stream->handler(stream->ce, pkt, stack, stack_index);
		if (res == PROTO_ERR) {
			packet_stream_end_process_packet(stream);
			return PROTO_ERR;
		}

		// Check if additional packets can be processed
		struct packet_stream_pkt *p = NULL;
		unsigned int cur_dir = direction, additional_processed = 0;
		while ((p = packet_stream_get_next(stream, &cur_dir))) {


			debug_stream("thread %p, entry %p, packet %u.%06u, seq %u, ack %u : process additional", pthread_self(), stream, p->pkt->ts.tv_sec, p->pkt->ts.tv_usec, p->seq, p->ack);

			if (stream->handler(stream->ce, p->pkt, p->stack, p->stack_index) == POM_ERR) {
				packet_stream_end_process_packet(stream);
				return PROTO_ERR;
			}

			stream->cur_seq[cur_dir] += p->plen;
			stream->cur_ack[cur_dir] = p->ack;
	
			packet_stream_free_packet(p);

			additional_processed = 1;
		}

		if (additional_processed) {
			if (!stream->head[POM_DIR_FWD] && !stream->head[POM_DIR_REV])
				conntrack_timer_dequeue(stream->t);
			else
				conntrack_timer_queue(stream->t, stream->same_dir_timeout);
		}

		packet_stream_end_process_packet(stream);
		debug_stream("thread %p, entry %p, packet %u.%06u, seq %u, ack %u : done processed", pthread_self(), stream, pkt->ts.tv_sec, pkt->ts.tv_usec, seq, ack);
		return res;
	}

	// Queue the packet then

	debug_stream("thread %p, entry %p, packet %u.%06u, seq %u, ack %u : queue", pthread_self(), stream, pkt->ts.tv_sec, pkt->ts.tv_usec, seq, ack);

	struct packet_stream_pkt *p = malloc(sizeof(struct packet_stream_pkt));
	if (!p) {
		pom_oom(sizeof(struct packet_stream_pkt));
		packet_stream_end_process_packet(stream);
		return PROTO_ERR;
	}
	memset(p, 0 , sizeof(struct packet_stream_pkt));


	if (cur_stack->plen) {
		// No need to backup this if there is no payload
		p->pkt = packet_clone(pkt, stream->flags);
		if (!p->pkt) {
			packet_stream_end_process_packet(stream);
			free(p);
			return PROTO_ERR;
		}
		p->stack = core_stack_backup(stack, pkt, p->pkt);
		if (!p->stack) {
			packet_stream_end_process_packet(stream);
			packet_pool_release(p->pkt);
			free(p);
			return PROTO_ERR;
		}
	}


	p->plen = cur_stack->plen;
	p->seq = seq;
	p->ack = ack;
	p->stack_index = stack_index;


	if (!stream->tail[direction]) {
		stream->head[direction] = p;
		stream->tail[direction] = p;
	} else { 

		struct packet_stream_pkt *tmp = stream->tail[direction];
		while ( tmp && 
			((tmp->seq >= seq && tmp->seq - seq < PACKET_HALF_SEQ)
			|| (tmp->seq <= seq && seq - tmp->seq > PACKET_HALF_SEQ))) {

			tmp = tmp->prev;

		}

		if (!tmp) {
			// Packet goes at the begining of the list
			p->next = stream->head[direction];
			if (p->next)
				p->next->prev = p;
			else
				stream->tail[direction] = p;
			stream->head[direction] = p;

		} else {
			// Insert the packet after the current one
			p->next = tmp->next;
			p->prev = tmp;

			if (p->next)
				p->next->prev = p;
			else
				stream->tail[direction] = p;

			tmp->next = p;

		}
	}
	
	stream->cur_buff_size += cur_stack->plen;

	
	if (stream->cur_buff_size >= stream->max_buff_size) {
		// Buffer overflow
		debug_stream("thread %p, entry %p, packet %u.%06u, seq %u, ack %u : buffer overflow, forced dequeue", pthread_self(), stream, pkt->ts.tv_sec, pkt->ts.tv_usec, seq, ack);
		if (packet_stream_force_dequeue(stream) != POM_OK) {
			packet_stream_end_process_packet(stream);
			return POM_ERR;
		}

		if (stream->t)
			conntrack_timer_dequeue(stream->t);
	}

	// Add timeout
	if (stream->t && (stream->head[POM_DIR_FWD] || stream->head[POM_DIR_REV])) 
		conntrack_timer_queue(stream->t, stream->same_dir_timeout);
	packet_stream_end_process_packet(stream);

	debug_stream("thread %p, entry %p, packet %u.%06u, seq %u, ack %u : done queued", pthread_self(),  stream, pkt->ts.tv_sec, pkt->ts.tv_usec, seq, ack);
	return PROTO_OK;
}
コード例 #29
0
ファイル: timer.c プロジェクト: Astalaseven/pom-ng
int timers_process() {


	static int processing = 0;

	int res = pthread_mutex_trylock(&timer_main_lock);
	if (res == EBUSY) {
		// Already locked, give up
		return POM_OK;
	} else if (res) {
		// Something went wrong
		pomlog(POMLOG_ERR "Error while trying to lock the main timer lock : %s", pom_strerror(res));
		abort();
		return POM_ERR;
	}

	// Another thread is already processing the timers, drop out
	if (processing) {
		pom_mutex_unlock(&timer_main_lock);
		return POM_OK;
	}

	processing = 1;

	ptime now = core_get_clock();

	struct timer_queue *tq;
	tq = timer_queues;

	while (tq) {
		while (tq->head && (tq->head->expires < now)) {
				
			// Dequeue the timer
			struct timer *tmp = tq->head;
			tq->head = tq->head->next;
			if (tq->head)
				tq->head->prev = NULL;
			else
				tq->tail = NULL;

			tmp->next = NULL;
			tmp->prev = NULL;
			tmp->queue = NULL;
			pom_mutex_unlock(&timer_main_lock);
			registry_perf_dec(perf_timer_queued, 1);

			// Process it
			debug_timer( "Timer 0x%lx reached. Starting handler ...", (unsigned long) tmp);
			if ((*tmp->handler) (tmp->priv, now) != POM_OK) {
				return POM_ERR;
			}

			registry_perf_inc(perf_timer_processed, 1);

			pom_mutex_lock(&timer_main_lock);

		}
		tq = tq->next;

	}

	processing = 0;

	pom_mutex_unlock(&timer_main_lock);

	return POM_OK;
}
コード例 #30
0
ファイル: conntrack.c プロジェクト: elfixit/pom-ng
int conntrack_cleanup(struct conntrack_tables *ct, uint32_t hash, struct conntrack_entry *ce) {

	// Remove the conntrack from the conntrack table
	pom_mutex_lock(&ct->locks[hash]);

	// Try to find the conntrack in the list
	struct conntrack_list *lst = NULL;

	for (lst = ct->table[hash]; lst && lst->ce != ce; lst = lst->next);

	if (!lst) {
		pom_mutex_unlock(&ct->locks[hash]);
		pomlog(POMLOG_ERR "Trying to cleanup a non existing conntrack : %p", ce);
		return POM_OK;
	}

	conntrack_lock(ce);
	if (ce->refcount) {
		debug_conntrack(POMLOG_ERR "Conntrack %p is still being referenced : %u !", ce, ce->refcount);
		conntrack_delayed_cleanup(ce, 1, core_get_clock_last());
		conntrack_unlock(ce);
		pom_mutex_unlock(&ct->locks[hash]);
		return POM_OK;
	}


	if (lst->prev)
		lst->prev->next = lst->next;
	else
		ct->table[hash] = lst->next;

	if (lst->next)
		lst->next->prev = lst->prev;

	free(lst);

	pom_mutex_unlock(&ct->locks[hash]);

	if (ce->cleanup_timer && ce->cleanup_timer != (void *) -1) {
		conntrack_timer_cleanup(ce->cleanup_timer);
		ce->cleanup_timer = (void *) -1; // Mark that the conntrack is being cleaned up
	}

	// Once the conntrack is removed from the hash table, it will not be referenced ever again
	conntrack_unlock(ce);

	if (ce->parent) {
		debug_conntrack("Cleaning up conntrack %p, with parent %p", ce, ce->parent->ce);
	} else {
		debug_conntrack("Cleaning up conntrack %p, with no parent", ce);
	}

	
	if (ce->parent) {
		// Remove the child from the parent
		
		// Make sure the parent still exists
		uint32_t hash = ce->parent->hash;
		pom_mutex_lock(&ce->parent->ct->locks[hash]);
		
		for (lst = ce->parent->ct->table[hash]; lst && lst->ce != ce->parent->ce; lst = lst->next);

		if (lst) {

			conntrack_lock(ce->parent->ce);
			struct conntrack_node_list *tmp = ce->parent->ce->children;

			for (; tmp && tmp->ce != ce; tmp = tmp->next);

			if (tmp) {
				if (tmp->prev)
					tmp->prev->next = tmp->next;
				else
					ce->parent->ce->children = tmp->next;

				if (tmp->next)
					tmp->next->prev = tmp->prev;

				free(tmp);
			} else {
				pomlog(POMLOG_WARN "Conntrack %s not found in parent's %s children list", ce, ce->parent->ce);
			}

			if (!ce->parent->ce->children) // Parent has no child anymore, clean it up after some time
				conntrack_delayed_cleanup(ce->parent->ce, CONNTRACK_CHILDLESS_TIMEOUT, core_get_clock_last());

			conntrack_unlock(ce->parent->ce);
		} else {
			debug_conntrack("Parent conntrack %p not found while cleaning child %p !", ce->parent->ce, ce);
		}

		pom_mutex_unlock(&ce->parent->ct->locks[hash]);

		free(ce->parent);
	}

	if (ce->session)
		conntrack_session_refcount_dec(ce->session);

	// Cleanup private stuff from the conntrack
	if (ce->priv && ce->proto->info->ct_info->cleanup_handler) {
		if (ce->proto->info->ct_info->cleanup_handler(ce->priv) != POM_OK)
			pomlog(POMLOG_WARN "Unable to free the private memory of a conntrack");
	}

	// Cleanup the priv_list
	struct conntrack_priv_list *priv_lst = ce->priv_list;
	while (priv_lst) {
		if (priv_lst->cleanup) {
			if (priv_lst->cleanup(priv_lst->obj, priv_lst->priv) != POM_OK)
				pomlog(POMLOG_WARN "Error while cleaning up private objects in conntrack_entry");
		}
		ce->priv_list = priv_lst->next;
		free(priv_lst);
		priv_lst = ce->priv_list;

	}


	// Cleanup the children
	while (ce->children) {
		struct conntrack_node_list *child = ce->children;
		ce->children = child->next;

		if (conntrack_cleanup(child->ct, child->hash, child->ce) != POM_OK) 
			return POM_ERR;

		free(child);
	}

	
	if (ce->fwd_value)
		ptype_cleanup(ce->fwd_value);
	if (ce->rev_value)
		ptype_cleanup(ce->rev_value);

	pthread_mutex_destroy(&ce->lock);

	registry_perf_dec(ce->proto->perf_conn_cur, 1);

	free(ce);

	return POM_OK;
}