enum core_state core_get_state() { pom_mutex_lock(&core_state_lock); enum core_state state = core_cur_state; pom_mutex_unlock(&core_state_lock); return state; }
int input_instance_remove(struct registry_instance *ri) { struct input *i = ri->priv; pom_mutex_lock(&i->lock); int running = i->running; pom_mutex_unlock(&i->lock); if (running && registry_set_param(i->reg_instance, "running", "0") != POM_OK) { return POM_ERR; } if (i->reg->info->cleanup) { if (i->reg->info->cleanup(i) != POM_OK) { pomlog(POMLOG_ERR "Error while cleaning up input"); return POM_ERR; } } pthread_mutex_destroy(&i->lock); free(i->name); if (i->prev) i->prev->next = i->next; else input_head = i->next; if (i->next) i->next->prev = i->prev; free(i); return POM_OK; }
int conntrack_timer_process(void *priv, ptime now) { struct conntrack_timer *t = priv; struct conntrack_tables *ct = t->proto->ct; // Lock the main table pom_mutex_lock(&ct->locks[t->hash]); // Check if the conntrack still exists struct conntrack_list *lst = NULL; for (lst = ct->table[t->hash]; lst && lst->ce != t->ce; lst = lst->next); if (!lst) { pomlog(POMLOG_DEBUG "Timer fired but conntrack doesn't exists anymore"); pom_mutex_unlock(&ct->locks[t->hash]); return POM_OK; } // Save the reference to the conntrack as the timer might get cleaned up struct conntrack_entry *ce = t->ce; // The handler will unlock the conntrack conntrack_lock(ce); pom_mutex_unlock(&ct->locks[t->hash]); int res = t->handler(ce, t->priv, now); return res; }
int conntrack_session_refcount_dec(struct conntrack_session *session) { pom_mutex_lock(&session->lock); session->refcount--; if (session->refcount) { pom_mutex_unlock(&session->lock); return POM_OK; } pom_mutex_unlock(&session->lock); pthread_mutex_destroy(&session->lock); while (session->privs) { struct conntrack_priv_list *lst = session->privs; session->privs = lst->next; if (lst->cleanup) { if (lst->cleanup(lst->obj, lst->priv) != POM_OK) pomlog(POMLOG_WARN "Cleanup handler failed for session priv"); } free(lst); } free(session); return POM_OK; }
int mod_load_all() { char *path = getenv(MOD_LIBDIR_ENV_VAR); if (!path) path = POM_LIBDIR; DIR *d; d = opendir(path); if (!d) { pomlog(POMLOG_ERR "Could not open directory %s for browsing : %s", path, pom_strerror(errno)); return POM_ERR; } struct dirent tmp, *dp; while (1) { if (readdir_r(d, &tmp, &dp) < 0) { pomlog(POMLOG_ERR "Error while reading directory entry : %s", pom_strerror(errno)); closedir(d); return POM_ERR; } if (!dp) // EOF break; size_t len = strlen(dp->d_name); if (len < strlen(POM_LIB_EXT) + 1) continue; if (!strcmp(dp->d_name + strlen(dp->d_name) - strlen(POM_LIB_EXT), POM_LIB_EXT)) { char *name = strdup(dp->d_name); if (!name) { pom_oom(strlen(dp->d_name)); closedir(d); return POM_ERR; } *(name + strlen(dp->d_name) - strlen(POM_LIB_EXT)) = 0; // Check if a dependency already loaded this module pom_mutex_lock(&mod_reg_lock); struct mod_reg *tmp; for (tmp = mod_reg_head; tmp && strcmp(name, tmp->name); tmp = tmp->next); if (tmp) { pom_mutex_unlock(&mod_reg_lock); free(name); continue; } pom_mutex_unlock(&mod_reg_lock); mod_load(name); free(name); } } closedir(d); return POM_OK; }
int xmlrpccmd_cleanup() { pom_mutex_lock(&xmlrpccmd_serial_lock); pthread_cond_broadcast(&xmlrpccmd_serial_cond); pom_mutex_unlock(&xmlrpccmd_serial_lock); return POM_OK; }
struct mod_reg *mod_get_by_name(char *name) { pom_mutex_lock(&mod_reg_lock); struct mod_reg *tmp; for (tmp = mod_reg_head; tmp && strcmp(tmp->name, name); tmp = tmp->next); pom_mutex_unlock(&mod_reg_lock); return tmp; }
void conntrack_refcount_dec(struct conntrack_entry *ce) { pom_mutex_lock(&ce->lock); if (!ce->refcount) { pomlog(POMLOG_ERR "Reference count already 0 !"); abort(); } ce->refcount--; pom_mutex_unlock(&ce->lock); }
struct packet_info *packet_info_pool_get(struct proto *p) { struct packet_info *info = NULL; pom_mutex_lock(&p->pkt_info_pool.lock); if (!p->pkt_info_pool.unused) { // Allocate new packet_info info = malloc(sizeof(struct packet_info)); if (!info) { pom_mutex_unlock(&p->pkt_info_pool.lock); pom_oom(sizeof(struct packet_info)); return NULL; } memset(info, 0, sizeof(struct packet_info)); struct proto_pkt_field *fields = p->info->pkt_fields; int i; for (i = 0; fields[i].name; i++); info->fields_value = malloc(sizeof(struct ptype*) * (i + 1)); memset(info->fields_value, 0, sizeof(struct ptype*) * (i + 1)); for (; i--; ){ info->fields_value[i] = ptype_alloc_from_type(fields[i].value_type); if (!info->fields_value[i]) { i++; for (; fields[i].name; i++) ptype_cleanup(info->fields_value[i]); free(info); pom_mutex_unlock(&p->pkt_info_pool.lock); return NULL; } } debug_info_pool("Allocated info %p for proto %s", info, p->info->name); } else { // Dequeue the packet_info from the unused pool info = p->pkt_info_pool.unused; p->pkt_info_pool.unused = info->pool_next; if (p->pkt_info_pool.unused) p->pkt_info_pool.unused->pool_prev = NULL; debug_info_pool("Used info %p for proto %s", info, p->info->name); } // Queue the packet_info in the used pool info->pool_prev = NULL; info->pool_next = p->pkt_info_pool.used; if (info->pool_next) info->pool_next->pool_prev = info; p->pkt_info_pool.used = info; pom_mutex_unlock(&p->pkt_info_pool.lock); return info; }
void mod_refcount_dec(struct mod_reg *mod) { if (!mod) return; pom_mutex_lock(&mod->lock); mod->refcount--; pom_mutex_unlock(&mod->lock); }
void xmlrcpcmd_serial_inc() { pom_mutex_lock(&xmlrpccmd_serial_lock); xmlrpccmd_serial++; if (pthread_cond_broadcast(&xmlrpccmd_serial_cond)) { pomlog(POMLOG_ERR "Error while signaling the serial condition. Aborting"); abort(); } pom_mutex_unlock(&xmlrpccmd_serial_lock); }
static void packet_stream_end_process_packet(struct packet_stream *stream) { pom_mutex_unlock(&stream->lock); pom_mutex_lock(&stream->wait_lock); if (stream->wait_list_head) { debug_stream("thread %p, entry %p : signaling thread %p", pthread_self(), stream, stream->wait_list_head->thread); pthread_cond_broadcast(&stream->wait_list_head->cond); } pom_mutex_unlock(&stream->wait_lock); }
int timers_process() { struct timeval now; core_get_clock(&now); pom_mutex_lock(&timer_main_lock); struct timer_queue *tq; tq = timer_queues; while (tq) { while (tq->head && timercmp(&tq->head->expires, &now, <)) { // Dequeue the timer struct timer *tmp = tq->head; tq->head = tq->head->next; if (tq->head) tq->head->prev = NULL; else tq->tail = NULL; tmp->next = NULL; tmp->prev = NULL; tmp->queue = NULL; pom_mutex_unlock(&timer_main_lock); // Process it debug_timer( "Timer 0x%lx reached. Starting handler ...", (unsigned long) tmp); if ((*tmp->handler) (tmp->priv, &now) != POM_OK) { return POM_ERR; } pom_mutex_lock(&timer_main_lock); } tq = tq->next; } pom_mutex_unlock(&timer_main_lock); return POM_OK; }
int packet_pool_release(struct packet *p) { struct packet_multipart *multipart = NULL; pom_mutex_lock(&packet_list_mutex); if (p->multipart) { multipart = p->multipart; p->multipart = NULL; } p->refcount--; if (p->refcount) { pom_mutex_unlock(&packet_list_mutex); if (multipart) // Always release the multipart return packet_multipart_cleanup(multipart); return POM_OK; } // Remove the packet from the used list if (p->next) p->next->prev = p->prev; if (p->prev) p->prev->next = p->next; else packet_head = p->next; if (p->pkt_buff) { packet_buffer_pool_release(p->pkt_buff); p->pkt_buff = NULL; p->buff = NULL; } memset(p, 0, sizeof(struct packet)); // Add it back to the unused list #ifdef PACKET_INFO_POOL_ALLOC_DEBUG free(p); #else p->next = packet_unused_head; if (p->next) p->next->prev = p; packet_unused_head = p; #endif pom_mutex_unlock(&packet_list_mutex); int res = POM_OK; if (multipart) res = packet_multipart_cleanup(multipart); return res; }
void core_wait_state(enum core_state state) { pom_mutex_lock(&core_state_lock); while (core_cur_state != state) { if (pthread_cond_wait(&core_state_cond, &core_state_lock)) { pomlog(POMLOG_ERR "Error while waiting for core cond : %s", pom_strerror(errno)); abort(); break; } } pom_mutex_unlock(&core_state_lock); }
static int analyzer_docsis_cm_timeout(void *cable_modem, ptime now) { struct analyzer_docsis_cm *cm = cable_modem; struct analyzer_docsis_priv *priv = cm->analyzer->priv; pom_mutex_lock(&priv->lock); analyzer_docsis_reg_status_update(priv, cm, docsis_mmt_rng_status_unknown, now, NULL, 0); pom_mutex_unlock(&priv->lock); return POM_OK; }
int packet_stream_timeout(struct conntrack_entry *ce, void *priv) { struct packet_stream *stream = priv; int res = POM_OK; pom_mutex_lock(&stream->lock); res = packet_stream_force_dequeue(stream); pom_mutex_unlock(&stream->lock); return res; }
static void stream_end_process_packet(struct stream *stream) { conntrack_delayed_cleanup(stream->ce, stream->timeout, stream->last_ts); pom_mutex_unlock(&stream->lock); pom_mutex_lock(&stream->wait_lock); if (stream->wait_list_head) { debug_stream("thread %p, entry %p : signaling thread %p", pthread_self(), stream, stream->wait_list_head->thread); pthread_cond_broadcast(&stream->wait_list_head->cond); } pom_mutex_unlock(&stream->wait_lock); }
int core_process_dump_info(struct proto_process_stack *s, struct packet *p, int res) { char *res_str = "unknown result code"; switch (res) { case PROTO_OK: res_str = "processed ok"; break; case PROTO_INVALID: res_str = "invalid packet"; break; case PROTO_STOP: res_str = "processing stopped"; break; case PROTO_ERR: res_str = "processing encountered an error"; break; } static pthread_mutex_t debug_lock = PTHREAD_MUTEX_INITIALIZER; pom_mutex_lock(&debug_lock); printf("thread %u | %u.%u | ", (unsigned int)pthread_self(), (int)pom_ptime_sec(p->ts), (int)pom_ptime_usec(p->ts)); // Dump packet info int i; for (i = 1; i < CORE_PROTO_STACK_MAX - 1 && s[i].proto; i++) { printf("%s { ", s[i].proto->info->name); char buff[256]; if (s[i].pkt_info) { if (s[i].proto->info->pkt_fields) { int j; for (j = 0; s[i].proto->info->pkt_fields[j].name; j++) { ptype_print_val(s[i].pkt_info->fields_value[j], buff, sizeof(buff) - 1, NULL); printf("%s: %s; ", s[i].proto->info->pkt_fields[j].name, buff); } } } else { printf("pkt_info missing "); } printf("}; "); } printf(": %s\n", res_str); pom_mutex_unlock(&debug_lock); return POM_OK; }
int addon_output_close(void *output_priv) { struct addon_instance_priv *p = output_priv; pom_mutex_lock(&p->lock); lua_getfield(p->L, LUA_REGISTRYINDEX, ADDON_INSTANCE); // Stack : self lua_getfield(p->L, -1, "close"); // Stack : self, close_func lua_pushvalue(p->L, -2); // Stack : self, close_func, self int res = addon_pcall(p->L, 1, 0); // Stack : self lua_pop(p->L, 1); // Stack : empty pom_mutex_unlock(&p->lock); return res; }
int timer_sys_cleanup(struct timer_sys *t) { pom_mutex_lock(&timer_sys_lock); if (t->prev || t->next || timer_sys_head == t) { if (t->prev) t->prev->next = t->next; else timer_sys_head = t->next; if (t->next) t->next->prev = t->prev; else timer_sys_tail = t->prev; } pom_mutex_unlock(&timer_sys_lock); free(t); return POM_OK; }
int timer_sys_dequeue(struct timer_sys *t) { pom_mutex_lock(&timer_sys_lock); if (t->prev || t->next || timer_sys_head == t) { if (t->prev) t->prev->next = t->next; else timer_sys_head = t->next; if (t->next) t->next->prev = t->prev; else timer_sys_tail = t->prev; t->prev = NULL; t->next = NULL; } pom_mutex_unlock(&timer_sys_lock); return POM_OK; }
void core_get_clock(struct timeval *now) { pom_mutex_lock(&core_clock_lock); memcpy(now, &core_clock[0], sizeof(struct timeval)); // Take only the least recent time int i; for (i = 1; i < core_num_threads; i++) { if ((now->tv_sec > core_clock[i].tv_sec) || ((now->tv_sec == core_clock[i].tv_sec) && (now->tv_usec > core_clock[i].tv_sec))) { memcpy(now, &core_clock, sizeof(struct timeval)); } } pom_mutex_unlock(&core_clock_lock); }
int timer_dequeue(struct timer *t) { // First let's check if it's the one at the begining of the queue pom_mutex_lock(&timer_main_lock); if (!t->queue) { pomlog(POMLOG_WARN "Warning, timer %p was already dequeued", t); pom_mutex_unlock(&timer_main_lock); return POM_OK; } if (t->prev) { t->prev->next = t->next; } else { t->queue->head = t->next; if (t->queue->head) t->queue->head->prev = NULL; } if (t->next) { t->next->prev = t->prev; } else { t->queue->tail = t->prev; if (t->queue->tail) t->queue->tail->next = NULL; } // Make sure this timer will not reference anything t->prev = NULL; t->next = NULL; t->queue = NULL; pom_mutex_unlock(&timer_main_lock); registry_perf_dec(perf_timer_queued, 1); return POM_OK; }
void registry_perf_reset(struct registry_perf *p) { if (p->type == registry_perf_type_gauge) return; if (p->update_hook) { pom_mutex_lock(&p->hook_lock); p->value = 0; pom_mutex_unlock(&p->hook_lock); } if (p->type == registry_perf_type_timeticks) { uint64_t running = p->value & REGISTRY_PERF_TIMETICKS_STARTED; if (running) { p->value = pom_gettimeofday() + REGISTRY_PERF_TIMETICKS_STARTED; } else { p->value = 0; } }else { p->value = 0; } }
xmlrpc_value *xmlrpccmd_core_serial_poll(xmlrpc_env * const envP, xmlrpc_value * const paramArrayP, void * const userData) { uint32_t last_serial = 0; xmlrpc_decompose_value(envP, paramArrayP, "(i)", &last_serial); if (envP->fault_occurred) return NULL; pom_mutex_lock(&xmlrpccmd_serial_lock); if (last_serial == xmlrpccmd_serial) { // Wait for update if (pthread_cond_wait(&xmlrpccmd_serial_cond, &xmlrpccmd_serial_lock)) { xmlrpc_faultf(envP, "Error while waiting for serial condition : %s", pom_strerror(errno)); abort(); return NULL; } } last_serial = xmlrpccmd_serial; pom_mutex_unlock(&xmlrpccmd_serial_lock); registry_lock(); pomlog_rlock(); struct pomlog_entry *last_log = pomlog_get_tail(); xmlrpc_value *res = xmlrpc_build_value(envP, "{s:i,s:i,s:i}", "main", last_serial, "registry", registry_serial_get(), "log", last_log->id); pomlog_unlock(); registry_unlock(); return res; }
int output_cleanup() { pom_mutex_lock(&output_lock); if (output_registry_class) registry_remove_class(output_registry_class); output_registry_class = NULL; while (output_reg_head) { struct output_reg *tmp = output_reg_head; output_reg_head = tmp->next; mod_refcount_dec(tmp->reg_info->mod); free(tmp); } pom_mutex_unlock(&output_lock); return POM_OK; }
struct conntrack_session *conntrack_session_get(struct conntrack_entry *ce) { if (!ce->session) { ce->session = malloc(sizeof(struct conntrack_session)); if (!ce->session) { pom_oom(sizeof(struct conntrack_session)); return NULL; } memset(ce->session, 0, sizeof(struct conntrack_session)); if (pthread_mutex_init(&ce->session->lock, NULL)) { pomlog(POMLOG_ERR "Error while initializing session mutex : %s", pom_strerror(errno)); free(ce->session); ce->session = NULL; return NULL; } ce->session->refcount++; } pom_mutex_lock(&ce->session->lock); return ce->session; }
int stream_set_start_seq(struct stream *stream, unsigned int direction, uint32_t seq) { pom_mutex_lock(&stream->lock); if (stream->flags & STREAM_FLAG_RUNNING) { debug_stream("thread %p, entry %p : not accepting additional sequence update as the stream stared", pthread_self(), stream); stream_end_process_packet(stream); return POM_OK; } int dir_flag = (direction == POM_DIR_FWD ? STREAM_FLAG_GOT_FWD_STARTSEQ : STREAM_FLAG_GOT_REV_STARTSEQ); stream->flags |= dir_flag; stream->cur_seq[direction] = seq; debug_stream("thread %p, entry %p : start_seq for direction %u set to %u", pthread_self(), stream, direction, seq); struct stream_pkt *p = NULL; while ((p = stream_get_next(stream, &direction))) { debug_stream("thread %p, entry %p, packet %u.%06u, seq %u, ack %u : process additional", pthread_self(), stream, pom_ptime_sec(p->pkt->ts), pom_ptime_usec(p->pkt->ts), p->seq, p->ack); // Flag the stream as running stream->flags |= STREAM_FLAG_RUNNING; if (stream->handler(stream->ce, p->pkt, p->stack, p->stack_index) == PROTO_ERR) { stream_end_process_packet(stream); return POM_ERR; } stream->cur_seq[direction] += p->plen; stream_free_packet(p); } stream_end_process_packet(stream); return POM_OK; }
struct packet *packet_pool_get() { pom_mutex_lock(&packet_list_mutex); struct packet *tmp = packet_unused_head; if (!tmp) { // Alloc a new packet tmp = malloc(sizeof(struct packet)); if (!tmp) { pom_mutex_unlock(&packet_list_mutex); pom_oom(sizeof(struct packet)); return NULL; } } else { // Fetch it from the unused pool packet_unused_head = tmp->next; if (packet_unused_head) packet_unused_head->prev = NULL; } memset(tmp, 0, sizeof(struct packet)); // Add the packet to the used pool tmp->next = packet_head; if (tmp->next) tmp->next->prev = tmp; packet_head = tmp; tmp->refcount = 1; pom_mutex_unlock(&packet_list_mutex); return tmp; }