static void _delete_event_connection_fd(int fd) { struct cerebrod_event_connection_data *ecd; ListIterator eitr; #if CEREBRO_DEBUG int rv; #endif /* CEREBRO_DEBUG */ assert(fd >= 0); #if CEREBRO_DEBUG /* Should be called with lock already set */ rv = Pthread_mutex_trylock(&event_connections_lock); if (rv != EBUSY) CEREBROD_EXIT(("mutex not locked: rv=%d", rv)); #endif /* CEREBRO_DEBUG */ eitr = List_iterator_create(event_connections); while ((ecd = list_next(eitr))) { if (ecd->fd == fd) { List connections; if ((connections = Hash_find(event_connections_index, ecd->event_name))) { ListIterator citr; int *fdPtr; citr = List_iterator_create(connections); while ((fdPtr = list_next(citr))) { if (*fdPtr == fd) { List_delete(citr); break; } } List_iterator_destroy(citr); } List_delete(eitr); break; } } List_iterator_destroy(eitr); }
/* * _hostrange_output * * Output metric data in hostrange format. The algorithm involves * using the metric_value as a hash key. Each hash item will then * store the hosts with the same metric_value/key. */ static void _hostrange_output(List l) { #if CEREBRO_DEBUG const char *func = __FUNCTION__; #endif /* CEREBRO_DEBUG */ struct node_metric_data *data = NULL; ListIterator litr = NULL; unsigned int count; hash_t h; assert(l); count = List_count(l); #if CEREBRO_DEBUG if (!count) err_exit("%s: invalid count", func); #endif /* CEREBRO_DEBUG */ h = Hash_create(count, (hash_key_f)hash_key_string, (hash_cmp_f)strcmp, (hash_del_f)_hostrange_data_destroy); litr = List_iterator_create(l); while ((data = list_next(litr))) { char buf[CEREBRO_STAT_BUFLEN]; struct hostrange_data *hd; _metric_value_str(data->metric_value_type, data->metric_value_len, data->metric_value, buf, CEREBRO_STAT_BUFLEN); if (!(hd = Hash_find(h, buf))) { hd = Malloc(sizeof(struct hostrange_data)); hd->hl = Hostlist_create(NULL); hd->key = Strdup(buf); Hash_insert(h, hd->key, hd); } Hostlist_push(hd->hl, data->nodename); } Hash_for_each(h, _hostrange_output_data, NULL); /* No need to destroy list iterator, caller will destroy List */ Hash_destroy(h); }
/* * _metric_list * * Output list of all available metrics */ static void _metric_list(void) { const char *func = __FUNCTION__; cerebro_namelist_t m = NULL; cerebro_namelist_iterator_t mitr = NULL; List l = NULL; ListIterator litr = NULL; char *str; if (!(m = cerebro_get_metric_names(handle))) { char *msg = cerebro_strerror(cerebro_errnum(handle)); _clean_err_exit(cerebro_errnum(handle)); err_exit("%s: cerebro_get_metric_names: %s", func, msg); } if (!(mitr = cerebro_namelist_iterator_create(m))) { char *msg = cerebro_strerror(cerebro_namelist_errnum(m)); err_exit("%s: cerebro_namelist_iterator_create: %s", func, msg); } l = List_create(NULL); while (!cerebro_namelist_iterator_at_end(mitr)) { if (cerebro_namelist_iterator_name(mitr, &str) < 0) { char *msg = cerebro_strerror(cerebro_namelist_iterator_errnum(mitr)); err_exit("%s: cerebro_namelist_iterator_name: %s", func, msg); } List_append(l, str); if (cerebro_namelist_iterator_next(mitr) < 0) { char *msg = cerebro_strerror(cerebro_namelist_iterator_errnum(mitr)); err_exit("%s: cerebro_namelist_iterator_next: %s", func, msg); } } litr = List_iterator_create(l); while ((str = list_next(litr))) fprintf(stdout, "%s\n", str); /* List_destroy() and cerebro_namelist_destory() destroy iterators too */ List_destroy(l); (void)cerebro_namelist_destroy(m); }
/* * cerebrod_monitor_modules_update * * Send metric data to the appropriate monitor modules, if necessary. * The struct cerebrod_node_data lock should already be locked. */ void cerebrod_monitor_modules_update(const char *nodename, struct cerebrod_node_data *nd, const char *metric_name, struct cerebrod_message_metric *mm) { struct cerebrod_monitor_module_info *monitor_module; struct cerebrod_monitor_module_list *ml; #if CEREBRO_DEBUG int rv; #endif /* CEREBRO_DEBUG */ assert(nodename && nd && metric_name && mm); if (!monitor_index) return; #if CEREBRO_DEBUG /* Should be called with lock already set */ rv = Pthread_mutex_trylock(&nd->node_data_lock); if (rv != EBUSY) CEREBROD_EXIT(("mutex not locked: rv=%d", rv)); #endif /* CEREBRO_DEBUG */ if ((ml = Hash_find(monitor_index, metric_name))) { ListIterator itr = NULL; Pthread_mutex_lock(&(ml->monitor_list_lock)); itr = List_iterator_create(ml->monitor_list); Pthread_mutex_unlock(&(ml->monitor_list_lock)); while ((monitor_module = list_next(itr))) { Pthread_mutex_lock(&monitor_module->monitor_lock); monitor_module_metric_update(monitor_handle, monitor_module->index, nodename, metric_name, mm->metric_value_type, mm->metric_value_len, mm->metric_value); Pthread_mutex_unlock(&monitor_module->monitor_lock); } Pthread_mutex_lock(&(ml->monitor_list_lock)); List_iterator_destroy(itr); Pthread_mutex_unlock(&(ml->monitor_list_lock)); } }
/* * _newline_output * * Output metric data, one node per line */ static void _newline_output(List l) { struct node_metric_data *data = NULL; ListIterator litr = NULL; assert(l); litr = List_iterator_create(l); while ((data = list_next(litr))) { char vbuf[CEREBRO_STAT_BUFLEN]; memset(vbuf, '\0', CEREBRO_STAT_BUFLEN); _metric_value_str(data->metric_value_type, data->metric_value_len, data->metric_value, vbuf, CEREBRO_STAT_BUFLEN); if (metric_received_time_flag) { char tbuf[CEREBRO_STAT_BUFLEN]; memset(tbuf, '\0', CEREBRO_STAT_BUFLEN); if (data->metric_value_received_time) { time_t t = (time_t)data->metric_value_received_time; struct tm *tm = Localtime(&t); strftime(tbuf, CEREBRO_STAT_BUFLEN, "%F %I:%M:%S%P", tm); } else snprintf(tbuf, CEREBRO_STAT_BUFLEN, CEREBRO_STAT_UNKNOWN_STRING); fprintf(stdout, "%s(%s): %s\n", data->nodename, tbuf, vbuf); } else fprintf(stdout, "%s: %s\n", data->nodename, vbuf); } /* No need to destroy list iterator, caller will destroy List */ }
/* * _cluster_nodes_output * * Output cluster nodes */ static void _cluster_nodes_output(List l) { struct node_metric_data *data = NULL; hostlist_t hl = NULL; ListIterator litr = NULL; assert(l); litr = List_iterator_create(l); if (output_type == CEREBRO_STAT_HOSTRANGE) hl = Hostlist_create(NULL); while ((data = list_next(litr))) { if (output_type == CEREBRO_STAT_NEWLINE) fprintf(stdout, "%s\n", data->nodename); if (output_type == CEREBRO_STAT_HOSTRANGE) Hostlist_push(hl, data->nodename); } if (output_type == CEREBRO_STAT_HOSTRANGE) { char hstr[CEREBRO_STAT_BUFLEN]; Hostlist_sort(hl); Hostlist_uniq(hl); memset(hstr, '\0', CEREBRO_STAT_BUFLEN); Hostlist_ranged_string(hl, CEREBRO_STAT_BUFLEN, hstr); fprintf(stdout, "%s\n", hstr); } /* No need to destroy list iterator, caller will destroy List */ if (hl) Hostlist_destroy(hl); }
void cerebrod_speaker_data_get_heartbeat_metric_data(struct cerebrod_message *msg, unsigned int *message_len, int *more_data_to_send) { struct cerebrod_speaker_metric_info *metric_info; ListIterator itr = NULL; struct timeval tv; assert(msg && message_len && more_data_to_send); if (!speaker_data_init) CEREBRO_EXIT(("initialization not complete")); *more_data_to_send = 0; #if !WITH_CEREBROD_NO_THREADS Pthread_mutex_lock(&metric_list_lock); #endif /* !WITH_CEREBROD_NO_THREADS */ /* There may not be any metrics to distribute */ if (!metric_list_size) { msg->metrics_len = 0; msg->metrics = NULL; goto lock_out; } msg->metrics_len = 0; msg->metrics = Malloc(sizeof(struct cerebrod_message_metric *)*(metric_list_size + 1)); memset(msg->metrics, '\0', sizeof(struct cerebrod_message_metric *)*(metric_list_size + 1)); Gettimeofday(&tv, NULL); itr = List_iterator_create(metric_list); while ((metric_info = list_next(itr))) { struct cerebrod_message_metric *mm = NULL; if (tv.tv_sec <= metric_info->next_call_time) break; if (metric_info->metric_origin & CEREBROD_METRIC_SPEAKER_ORIGIN_MODULE && !(metric_info->metric_flags & CEREBRO_METRIC_MODULE_FLAGS_SEND_ON_PERIOD)) mm = _get_module_metric_value(metric_info->index); if (metric_info->metric_origin & CEREBROD_METRIC_SPEAKER_ORIGIN_USERSPACE) mm = _get_userspace_metric_value(metric_info); if (mm) { unsigned int new_len; new_len = *message_len + CEREBROD_MESSAGE_METRIC_HEADER_LEN + mm->metric_value_len; if (new_len >= CEREBRO_MAX_PACKET_LEN) { *more_data_to_send = 1; goto sort_out; } else { *message_len += CEREBROD_MESSAGE_METRIC_HEADER_LEN; *message_len += mm->metric_value_len; msg->metrics[msg->metrics_len] = mm; msg->metrics_len++; } } if (metric_info->metric_origin & CEREBROD_METRIC_SPEAKER_ORIGIN_MODULE) { if (metric_info->metric_period < 0) metric_info->next_call_time = UINT_MAX; /* * Metric period stays at 0 for metrics that need to be * propogated every time */ if (metric_info->metric_period > 0) metric_info->next_call_time = tv.tv_sec + metric_info->metric_period; } if (metric_info->metric_origin & CEREBROD_METRIC_SPEAKER_ORIGIN_USERSPACE) metric_info->next_call_time = UINT_MAX; } sort_out: List_iterator_destroy(itr); cerebrod_speaker_data_metric_list_sort(); lock_out: #if !WITH_CEREBROD_NO_THREADS Pthread_mutex_unlock(&metric_list_lock); #endif /* !WITH_CEREBROD_NO_THREADS */ return; }
void cerebrod_event_modules_update(const char *nodename, struct cerebrod_node_data *nd, const char *metric_name, struct cerebrod_message_metric *mm) { struct cerebrod_event_module_info *event_module; struct cerebrod_event_module_list *el; #if CEREBRO_DEBUG int rv; #endif /* CEREBRO_DEBUG */ assert(nodename && nd && metric_name && mm); if (!event_index) return; #if CEREBRO_DEBUG /* Should be called with lock already set */ rv = Pthread_mutex_trylock(&nd->node_data_lock); if (rv != EBUSY) CEREBROD_EXIT(("mutex not locked: rv=%d", rv)); #endif /* CEREBRO_DEBUG */ /* * This function may be called by multiple threads by the listener. * * The event_index is setup at the beginning and is only read, not * written to. However, the lists stored inside the event_index * need to called w/ thread safety (due to the nature of the list * API). * */ if ((el = Hash_find(event_index, metric_name))) { struct cerebro_event *event = NULL; ListIterator itr = NULL; int rv; Pthread_mutex_lock(&(el->event_list_lock)); itr = List_iterator_create(el->event_list); Pthread_mutex_unlock(&(el->event_list_lock)); while ((event_module = list_next(itr))) { Pthread_mutex_lock(&event_module->event_lock); if ((rv = event_module_metric_update(event_handle, event_module->index, nodename, metric_name, mm->metric_value_type, mm->metric_value_len, mm->metric_value, &event)) < 0) { CEREBROD_DBG(("event_module_metric_update")); goto loop_next; } if (rv && event) cerebrod_queue_event(event, event_module->index); loop_next: Pthread_mutex_unlock(&event_module->event_lock); } Pthread_mutex_lock(&(el->event_list_lock)); List_iterator_destroy(itr); Pthread_mutex_unlock(&(el->event_list_lock)); } }
void * cerebrod_event_queue_monitor(void *arg) { List temp_event_queue; _event_queue_monitor_initialize(); /* Don't bother if there isn't an event queue (i.e. no event modules) */ if (!event_queue) return NULL; temp_event_queue = List_create((ListDelF)cerebrod_event_to_send_destroy); /* * achu: The listener and thus event update initialization is * started after this thread is started. So the and event_index may * not be set up the first time this loop is reached. * * However, it must be set after the condition is signaled, b/c the * listener (and thus event update code) and event node timeout * thread begin after the listener is setup. * * Thus, we put the event_queue assert inside the loop. */ for (;;) { struct cerebrod_event_to_send *ets; ListIterator eitr; ListIterator titr; Pthread_mutex_lock(&event_queue_lock); assert(event_queue); while (list_count(event_queue) == 0) Pthread_cond_wait(&event_queue_cond, &event_queue_lock); /* Debug dumping in the below loop can race with the debug * dumping from the listener, b/c of racing on the * event_queue_lock. To avoid this race, we copy the data off * the event_queue, so the event_queue_lock can be freed up. */ eitr = List_iterator_create(event_queue); while ((ets = list_next(eitr))) { List_append(temp_event_queue, ets); List_remove(eitr); } List_iterator_destroy(eitr); Pthread_mutex_unlock(&event_queue_lock); titr = List_iterator_create(temp_event_queue); while ((ets = list_next(titr))) { List connections; _event_dump(ets->event); Pthread_mutex_lock(&event_connections_lock); if ((connections = Hash_find(event_connections_index, ets->event_name))) { char buf[CEREBRO_MAX_PACKET_LEN]; int elen; if ((elen = _event_marshall(ets->event, buf, CEREBRO_MAX_PACKET_LEN)) > 0) { ListIterator citr; int *fd; citr = List_iterator_create(connections); while ((fd = list_next(citr))) { if (fd_write_n(*fd, buf, elen) < 0) { CEREBROD_DBG(("fd_write_n: %s", strerror(errno))); if (errno == EPIPE || errno == EINVAL || errno == EBADF || errno == ENODEV || errno == ENETDOWN || errno == ENETUNREACH) { if (conf.event_server_debug) { Pthread_mutex_lock(&debug_output_mutex); fprintf(stderr, "**************************************\n"); fprintf(stderr, "* Event Connection Died: errno = %d\n", errno); fprintf(stderr, "**************************************\n"); Pthread_mutex_unlock(&debug_output_mutex); } List_delete(citr); } continue; } } List_iterator_destroy(citr); } } Pthread_mutex_unlock(&event_connections_lock); List_delete(titr); } List_iterator_destroy(titr); } List_destroy(temp_event_queue); return NULL; /* NOT REACHED */ }
void * cerebrod_event_server(void *arg) { int server_fd; _event_server_initialize(); if ((server_fd = _event_server_setup_socket(0)) < 0) CEREBROD_EXIT(("event server fd setup failed")); for (;;) { ListIterator eitr; struct cerebrod_event_connection_data *ecd; struct pollfd *pfds; int pfdslen = 0; int i; /* Note that the list_count won't grow larger after the first * mutex block, b/c the cerebrod_event_queue_monitor thread can * never add to the event_connections. It can only shrink it. */ Pthread_mutex_lock(&event_connections_lock); if (event_connections) pfdslen = List_count(event_connections); Pthread_mutex_unlock(&event_connections_lock); /* The + 1 is b/c of the server_fd. */ pfdslen++; pfds = Malloc(sizeof(struct pollfd) * pfdslen); memset(pfds, '\0', sizeof(struct pollfd) * pfdslen); pfds[0].fd = server_fd; pfds[0].events = POLLIN; pfds[0].revents = 0; /* No 'event_connections' if there are no events */ if (event_connections) { i = 1; Pthread_mutex_lock(&event_connections_lock); eitr = List_iterator_create(event_connections); while ((ecd = list_next(eitr))) { pfds[i].fd = ecd->fd; pfds[i].events = POLLIN; pfds[i].revents = 0; i++; } List_iterator_destroy(eitr); Pthread_mutex_unlock(&event_connections_lock); } Poll(pfds, pfdslen, -1); /* Deal with the server fd first */ if (pfds[0].revents & POLLERR) CEREBROD_DBG(("server_fd POLLERR")); else if (pfds[0].revents & POLLIN) { unsigned int client_addr_len; int fd; struct sockaddr_in client_addr; client_addr_len = sizeof(struct sockaddr_in); if ((fd = accept(server_fd, (struct sockaddr *)&client_addr, &client_addr_len)) < 0) server_fd = cerebrod_reinit_socket(server_fd, 0, _event_server_setup_socket, "event_server: accept"); if (fd >= 0) _event_server_service_connection(fd); } /* Deal with the connecting fds */ for (i = 1; i < pfdslen; i++) { if (pfds[i].revents & POLLERR) { CEREBROD_DBG(("fd = %d POLLERR", pfds[i].fd)); Pthread_mutex_lock(&event_connections_lock); _delete_event_connection_fd(pfds[i].fd); Pthread_mutex_unlock(&event_connections_lock); continue; } if (pfds[i].revents & POLLIN) { char buf[CEREBRO_MAX_PACKET_LEN]; int n; /* We should not expect any actual data. If * we get some, just eat it and move on. * * The common situation is that the client * closes the connection. So we need to delete * our fd. */ n = fd_read_n(pfds[i].fd, buf, CEREBRO_MAX_PACKET_LEN); if (n < 0) CEREBROD_DBG(("fd_read_n = %s", strerror(errno))); if (n <= 0) { if (conf.debug && conf.event_server_debug) { Pthread_mutex_lock(&debug_output_mutex); fprintf(stderr, "**************************************\n"); fprintf(stderr, "* Event Server Close Fd: %d\n", pfds[i].fd); fprintf(stderr, "**************************************\n"); Pthread_mutex_unlock(&debug_output_mutex); } Pthread_mutex_lock(&event_connections_lock); _delete_event_connection_fd(pfds[i].fd); Pthread_mutex_unlock(&event_connections_lock); } } } Free(pfds); } return NULL; /* NOT REACHED */ }