void history_destroy() { if( mode_table ) { hash_foreach( mode_table, (void (*)(void *, void *))&history_save_mode ); hash_foreach( mode_table, (void (*)(void *, void *))&history_destroy_mode_wrapper ); hash_destroy( mode_table ); free( mode_table ); mode_table=0; } }
/* Sums the metric summaries from all data sources. */ static int do_root_summary( datum_t *key, datum_t *val, void *arg ) { Source_t *source = (Source_t*) val->data; int rc; llist_entry *le; /* We skip dead sources. */ if (source->ds->dead) return 0; /* We skip metrics not to be summarized. */ if (llist_search(&(gmetad_config.unsummarized_metrics), (void *)key->data, llist_strncmp, &le) == 0) return 0; /* Need to be sure the source has a complete sum for its metrics. */ pthread_mutex_lock(source->sum_finished); /* We know that all these metrics are numeric. */ rc = hash_foreach(source->metric_summary, sum_metrics, arg); /* Update the top level root source */ root.hosts_up += source->hosts_up; root.hosts_down += source->hosts_down; /* summary completed for source */ pthread_mutex_unlock(source->sum_finished); return rc; }
int main(void) { HashTable ht; hash_init(&ht, 2); int a = 118; int b = 119; hash_add(&ht, "订单2355", &a); hash_add(&ht, "订单2399", &a); hash_add(&ht, "订单2388", &a); hash_add(&ht, "订单2333", &a); printf("哈希表大小为%d\n",ht.nTableSize); printf("哈希表已使用元素个数为%d\n",ht.nNumUsed); Bucket *find_bucket,*b2; find_bucket = hash_get(&ht, "订单2333"); hash_del(&ht, find_bucket); hash_del_by_key(&ht, "订单2388"); hash_add(&ht, "订单9999", &a); hash_add(&ht, "订单8888", &a); hash_add(&ht, "订单7777", &a); hash_foreach(&ht); return 0; }
void erts_print_node_info(int to, void *to_arg, Eterm sysname, int *no_sysname, int *no_total) { int lock = !ERTS_IS_CRASH_DUMPING; struct pn_data pnd; pnd.to = to; pnd.to_arg = to_arg; pnd.sysname = sysname; pnd.no_sysname = 0; pnd.no_total = 0; if (lock) erts_smp_mtx_lock(&erts_node_table_mtx); hash_foreach(&erts_node_table, print_node, (void *) &pnd); if (pnd.no_sysname != 0) { erts_print(to, to_arg, "\n"); } if (lock) erts_smp_mtx_unlock(&erts_node_table_mtx); if(no_sysname) *no_sysname = pnd.no_sysname; if(no_total) *no_total = pnd.no_total; }
/* Sums the metric summaries from all data sources. */ static int do_root_summary( datum_t *key, datum_t *val, void *arg ) { Source_t *source = (Source_t*) val->data; int rc; /* We skip dead sources. */ if (source->ds->dead) return 0; /* Need to be sure the source has a complete sum for its metrics. */ pthread_mutex_lock(source->sum_finished); /* We know that all these metrics are numeric. */ rc = hash_foreach(source->metric_summary, sum_metrics, arg); /* Update the top level root source */ root.hosts_up += source->hosts_up; root.hosts_down += source->hosts_down; /* summary completed for source */ pthread_mutex_unlock(source->sum_finished); return rc; }
void dump_channels(void) { char path[_POSIX_PATH_MAX], tmppath[_POSIX_PATH_MAX]; int fd; struct stat sts; snprintf(tmppath, sizeof(tmppath), "%s/channels.tmp", global.varDir); if((fd = open(tmppath, O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR ))==-1) { logerr("dump_channels", tmppath); return; } fake_fputs(":version 1", fd); fake_fputs(LE, fd); hash_foreach(global.channelHash, (hash_callback_t) dump_channel_cb, (void *) fd); if(close(fd)) { logerr("dump_channels", "close"); } snprintf(path, sizeof(path), "%s/channels", global.varDir); if(stat(path, &sts) == -1 && errno == ENOENT) { log_message_level(LOG_LEVEL_DEBUG, "%s file does not exist\n", path); } else { if(unlink(path)) logerr("dump_channels", "unlink"); /* not fatal, may not exist */ } if(rename(tmppath, path)) { logerr("dump_channels", "rename"); return; } }
int IpConnTrack_clear(IpConnTrack *self) { hash_foreach(&self->m_conn_hash, IpConnTrack_foreach_clear, 0); hash_clear(&self->m_conn_hash); array_clear(&self->m_local_addrs); memset(self, 0, sizeof(*self)); return 0; }
void hash_fprint(FILE *fout, ct_hash *hash, char *label) { fprintf(fout,"%s <",label); static_hplh_file = fout; hash_foreach(hash, &hash_fprint_helper); static_hplh_file = NULL; fprintf(fout,">\n"); }
ct_hash *hash_intersection(ct_hash *a, ct_hash *b) { ct_hash *intersect = hash_new(a->size > b->size ? a->size : b->size); static_hash_intersect_other = b; static_hash_intersection = intersect; hash_foreach(a,&hash_intersection_helper); return intersect; }
double hash_dot(ct_hash *a, ct_hash *b) { hash_dot_sum = 0.0; hash_dot_other = b; hash_foreach(a, &hash_dot_helper); return hash_dot_sum; }
static void _appbroker_constants(AppBroker * appbroker) { Hash * hash; if((hash = hash_get(appbroker->config, "constants")) == NULL) return; fputs("\n\n/* constants */\n", appbroker->fp); hash_foreach(hash, (HashForeach)_appbroker_foreach_constant, appbroker); }
void hash_fprint_labeled(FILE *fout, ct_hash *hash, char *label, WordMap *wordmap) { fprintf(fout,"%s <",label); static_hplh_file = fout; static_hash_print_wordmap = wordmap; hash_foreach(hash, &hash_print_labeled_helper); static_hash_print_wordmap = NULL; fprintf(fout,">\n"); }
double_hash *CW_ppmi(CW *model,int index) { double_hash *target_ppmi = double_hash_new(10); CW_static_f_ix = model->targets[index]->sum; static_ppmi = target_ppmi; hash_foreach(model->targets[index], &CW_ppmi_helper); return target_ppmi; }
void libconf_fini(libconf_t * handle) { int i; free(handle->global_config_filename); free(handle->local_config_filename); hash_foreach(handle->options, libconf_fini_helper, NULL); hash_free(handle->options); hash_foreach(handle->option_hash, libconf_fini_helper, NULL); hash_free(handle->option_hash); hash_foreach(handle->tmp_hash, libconf_fini_helper, NULL); hash_free(handle->tmp_hash); for (i = 0; i < handle->argc; i++) free(handle->argv[i]); free(handle->argv); if (handle->defaults) libconf_optparams_free(handle->defaults); free(handle); }
int verify(hash_t *hash, elt *elts, int nelts, char *msg) { int i, err; hash_node_t *node=0, tmp; elt *p; memset(&tmp, 0, sizeof(tmp)); printf("%s: ", msg); err = 0; for(i=0, p=elts; i<nelts; i++, p++) { p->got_foreach = 0; node = hash_get(hash, p); if( !p->in_hash && node ) { printf("error: p found in hash" " at %d: p=%p, in_hash=%d, key=%p val=%p", i, p, p->in_hash, node->node_key, node->node_val); err = 1; } if( p->in_hash && (!node || node->node_val != p) ) { node = &tmp; err = 1; printf("error: p not found in hash" " at %d: p=%p, in_hash=%d, key=%p val=%p\n", i, p, p->in_hash, node->node_key, node->node_val); } } hash_foreach(hash, verify_foreach, elts); for(i=0, p=elts; i<nelts; i++, p++) { if( p->in_hash && !p->got_foreach ) { printf("error in foreach: elt missed" " at i=%d, key=%p, val=%p, got_foreach=%d\n", i, node->node_key, node->node_val, p->got_foreach); err = 1; } } if( !err ) { hash_stat_t st; i = hash_stat(hash, &st); printf("ok\n count=%d buckets=%d used=%d(%0.1f%%) max=%d avg=%.1f", hash_count(hash), st.count, st.used, (float)100.0*st.used/st.count, st.max, st.avg); } printf("\n"); return err; }
/* Sums the metric summaries from all data sources. */ static int do_root_summary( datum_t *key, datum_t *val, void *arg ) { Source_t *source = (Source_t*) val->data; int rc; llist_entry *le; /* We skip dead sources. */ if (source->ds->dead) return 0; /* Need to be sure the source has a complete sum for its metrics. */ pthread_mutex_lock(source->sum_finished); if (gmetad_config.summarized_metrics != NULL) { for (le = gmetad_config.summarized_metrics; le != NULL; le = le->next) { datum_t skey, *r; skey.data = le->val; skey.size = strlen(le->val) + 1; r = hash_lookup(&skey, source->metric_summary); if (r != NULL) { sum_metrics(&skey, r, NULL); datum_free(r); } } rc = 0; } else { /* We skip metrics not to be summarized. */ if (llist_search(&(gmetad_config.unsummarized_metrics), (void *)key->data, llist_strncmp, &le) == 0) { rc = 0; goto out; } /* We know that all these metrics are numeric. */ rc = hash_foreach(source->metric_summary, sum_metrics, arg); } /* Update the top level root source */ root.hosts_up += source->hosts_up; root.hosts_down += source->hosts_down; out: /* summary completed for source */ pthread_mutex_unlock(source->sum_finished); return rc; }
static int endElement_GRID(void *data, const char *el) { xmldata_t *xmldata = (xmldata_t *) data; /* In non-scalable mode, we ignore GRIDs. */ if (!gmetad_config.scalable_mode) return 0; xmldata->grid_depth--; debug_msg("Found a </GRID>, depth is now %d", xmldata->grid_depth); datum_t hashkey, hashval; datum_t *rdatum; hash_t *summary; Source_t *source; /* Only keep info on sources we are an authority on. */ if (authority_mode(xmldata)) { source = &xmldata->source; /* Swap the metric_summary and metric_summary_pending over */ pthread_mutex_lock(source->sum_finished); { summary = xmldata->source.metric_summary_pending; xmldata->source.metric_summary_pending = xmldata->source.metric_summary; xmldata->source.metric_summary = summary; } pthread_mutex_unlock(source->sum_finished); hashkey.data = (void*) xmldata->sourcename; hashkey.size = strlen(xmldata->sourcename) + 1; hashval.data = source; /* Trim structure to the correct length. */ hashval.size = sizeof(*source) - GMETAD_FRAMESIZE + source->stringslen; /* We insert here to get an accurate hosts up/down value. */ rdatum = hash_insert( &hashkey, &hashval, xmldata->root); if (!rdatum) { err_msg("Could not insert source %s", xmldata->sourcename); return 1; } /* Write the metric summaries to the RRD. */ hash_foreach(summary, finish_processing_source, data); } return 0; }
static int source_summary(Source_t *source, client_t *client) { int rc; rc=xml_print(client, "<HOSTS UP=\"%u\" DOWN=\"%u\" SOURCE=\"gmetad\"/>\n", source->hosts_up, source->hosts_down); if (rc) return 1; pthread_mutex_lock(source->sum_finished); rc = hash_foreach(source->metric_summary, metric_summary, (void*) client); pthread_mutex_unlock(source->sum_finished); return rc; }
int main(int a, char **v) { struct hash _calinks, *calinks = &_calinks; const char* bundle = "bundleXXXXXX"; char* tmpfile = 0; if (asprintf(&tmpfile, "%s%s", ETCCERTSDIR, bundle) == -1) return 1; int fd = mkstemp(tmpfile); if (fd == -1) { fprintf(stderr, "Failed to open temporary file %s for ca bundle\n", tmpfile); return 1; } fchmod(fd, 0644); hash_init(calinks); /* Handle global CA certs from config file */ read_global_ca_list(CERTSCONF, calinks, fd); /* Handle local CA certificates */ dir_readfiles(calinks, LOCALCERTSDIR, FILE_REGULAR, &proc_localglobaldir, fd); /* Update etc cert dir for additions and deletions*/ dir_readfiles(calinks, ETCCERTSDIR, FILE_LINK, &proc_etccertsdir, fd); hash_foreach(calinks, update_ca_symlink); /* Update hashes and the bundle */ if (fd != -1) { close(fd); char* newcertname = 0; if (asprintf(&newcertname, "%s%s", ETCCERTSDIR, CERTBUNDLE) != -1) { rename(tmpfile, newcertname); free(newcertname); } } free(tmpfile); /* Execute run-parts */ static const char *run_parts_args[] = { "run-parts", RUNPARTSDIR, 0 }; execve("/usr/bin/run-parts", run_parts_args, NULL); execve("/bin/run-parts", run_parts_args, NULL); perror("run-parts"); return 1; }
static int mx_do_bgsave_queue() { char tbuf[2048]; /* database filename */ sprintf(tbuf, "%s.%d", mx_global->bgsave_filepath, getpid()); mx_dbfp = fopen(tbuf, "wb"); if (!mx_dbfp) { mx_write_log(mx_log_error, "failed to open bgsave tempfile"); return -1; } if (fwrite(MX_BGSAVE_HEADER, sizeof(MX_BGSAVE_HEADER) - 1, 1, mx_dbfp) != 1) { goto failed; } /* save ready queues */ if (hash_foreach(mx_global->queue_table, mx_save_ready_queue) != 0 || mx_save_delay_queue() != 0 || mx_save_recycle_queue() != 0) { goto failed; } /* save delay queue */ if (fwrite(&mx_null_header, sizeof(mx_null_header), 1, mx_dbfp) != 1) { goto failed; } fflush(mx_dbfp); fsync(fileno(mx_dbfp)); fclose(mx_dbfp); mx_dbfp = NULL; if (rename(tbuf, mx_global->bgsave_filepath) == -1) { mx_write_log(mx_log_error, "failed to rename tempfile, message(%s)", strerror(errno)); unlink(tbuf); return -1; } return 0; failed: mx_write_log(mx_log_error, "failed to write data to bgsave tempfile, message(%s)", strerror(errno)); fclose(mx_dbfp); mx_dbfp = NULL; return -1; }
/* duplicate a hash table */ hash_t *hash_dup(hash_t *ht) { hash_t *copy; copy = hash_create(ht->slots); if (!copy) { return NULL; } HASH_SET_KEYCPY(copy, ht->keycpy); HASH_SET_VALCPY(copy, ht->valcpy); HASH_SET_FREE_KEY(copy, ht->free_key); HASH_SET_FREE_VAL(copy, ht->free_val); HASH_SET_KEYCMP(copy, ht->keycmp); hash_foreach(ht, _hash_dup_foreach, copy); return copy; }
void version_sorter_sort(char **list, size_t list_len) { int i; StringLinkedList *sll; ht = hash_init(list_len); for (i = 0; i < list_len; i++) { sll = string_linked_list_init(); parse_version_word(list[i], sll); hash_put(ht, list[i], (void *) sll); } longest_string_len = 0; hash_foreach(ht, &foreach_longest_string_callback, NULL); qsort((void *) list, list_len, sizeof(char *), &compare_by_version); }
static int endElement_CLUSTER(void *data, const char *el) { xmldata_t *xmldata = (xmldata_t *) data; datum_t hashkey, hashval; datum_t *rdatum; hash_t *summary; Source_t *source; /* Only keep info on sources we are an authority on. */ if (authority_mode(xmldata)) { source = &xmldata->source; summary = xmldata->source.metric_summary; /* Release the partial sum mutex */ pthread_mutex_unlock(source->sum_finished); hashkey.data = (void*) xmldata->sourcename; hashkey.size = strlen(xmldata->sourcename) + 1; hashval.data = source; /* Trim structure to the correct length. */ hashval.size = sizeof(*source) - GMETAD_FRAMESIZE + source->stringslen; /* We insert here to get an accurate hosts up/down value. */ rdatum = hash_insert( &hashkey, &hashval, xmldata->root); if (!rdatum) { err_msg("Could not insert source %s", xmldata->sourcename); return 1; } /* Write the metric summaries to the RRD. */ hash_foreach(summary, finish_processing_source, data); } return 0; }
/*op>0 -> add enties to "to" from "from", only if they are absent in "to", op<0 -> rewrite entries in "to" by the corresponding "from", op==0 -> remove all entries in "to" matching "from"*/ HASH_TABLE binary_operations_under_hash_table(HASH_TABLE from, HASH_TABLE to,int op ) { COPY_ITERATOR_PARAMETERS iterator_params; if(from == NULL) return to; if(to == NULL) to=create_hash_table_with_copy( from->tablesize, from->hash, from->cmp, from->destructor, from->new_tag, from->new_cell ); /*ATTENTION!! VERY dangerous!! Here we use cpy_swallow in case of from->new_tag or from->new_cell === NULL! This may lead to TWICE applying of free() in case of nontrivial destructor!*/ if( from->new_tag!=NULL ) iterator_params.copy_tag=from->new_tag; else iterator_params.copy_tag=&cpy_swallow; if( from->new_cell !=NULL ) iterator_params.copy_cell=from->new_cell; else iterator_params.copy_cell=&cpy_swallow; iterator_params.to_table=to; iterator_params.from_to_priors=op; hash_foreach(from,&iterator_params,©_iterator); return to; }/*binary_operations_under_hash_table*/
/* sacerdoti: An Object Oriented design in C. * We use function pointers to approximate virtual method functions. * A recursive-descent design. */ static int tree_report(datum_t *key, datum_t *val, void *arg) { client_t *client = (client_t*) arg; Generic_t *node = (Generic_t*) val->data; int rc=0; if (client->filter && !applicable(client->filter, node)) return 1; rc = node->report_start(node, key, client, NULL); if (rc) return 1; applyfilter(client, node); if (node->children) { /* Allow this to stop early (return code = 1) */ hash_foreach(node->children, tree_report, arg); } rc = node->report_end(node, client, NULL); return rc; }
int main ( int argc, char *argv[] ) { int rc; struct stat struct_stat; pthread_t pid; pthread_attr_t attr; int i, num_sources; uid_t gmetad_uid; mode_t rrd_umask; char * gmetad_username; struct passwd *pw; gmetad_config_t *c = &gmetad_config; apr_interval_time_t sleep_time; apr_time_t last_metadata; double random_sleep_factor; unsigned int rand_seed; rc = apr_initialize(); if (rc != APR_SUCCESS) { return -1; } /* create a memory pool. */ apr_pool_create(&global_context, NULL); /* Ignore SIGPIPE */ signal( SIGPIPE, SIG_IGN ); initialize_scoreboard(); /* Mark the time this gmetad started */ started = apr_time_now(); if (cmdline_parser(argc, argv, &args_info) != 0) err_quit("command-line parser error"); num_sources = number_of_datasources( args_info.conf_arg ); if(!num_sources) { err_quit("%s doesn't have any data sources specified", args_info.conf_arg); } memset(&root, 0, sizeof(root)); root.id = ROOT_NODE; /* Get the real number of data sources later */ sources = hash_create( num_sources + 10 ); if (! sources ) { err_quit("Unable to create sources hash\n"); } root.authority = hash_create( num_sources + 10 ); if (!root.authority) { err_quit("Unable to create root authority (our grids and clusters) hash\n"); } root.metric_summary = hash_create (DEFAULT_METRICSIZE); if (!root.metric_summary) { err_quit("Unable to create root summary hash"); } parse_config_file ( args_info.conf_arg ); /* If given, use command line directives over config file ones. */ if (args_info.debug_given) { c->debug_level = args_info.debug_arg; } debug_level = c->debug_level; set_debug_msg_level(debug_level); /* Setup our default authority pointer if the conf file hasnt yet. * Done in the style of hash node strings. */ if (!root.stringslen) { gethostname(hostname, HOSTNAMESZ); root.authority_ptr = 0; sprintf(root.strings, "http://%s/ganglia/", hostname); root.stringslen += strlen(root.strings) + 1; } rand_seed = apr_time_now() * (int)pthread_self(); for(i = 0; i < root.stringslen; rand_seed = rand_seed * root.strings[i++]); /* Debug level 1 is error output only, and no daemonizing. */ if (!debug_level) { rrd_umask = c->umask; daemon_init (argv[0], 0, rrd_umask); } if (args_info.pid_file_given) { update_pidfile (args_info.pid_file_arg); } /* The rrd_rootdir must be writable by the gmetad process */ if( c->should_setuid ) { if(! (pw = getpwnam(c->setuid_username))) { err_sys("Getpwnam error"); } gmetad_uid = pw->pw_uid; gmetad_username = c->setuid_username; } else { gmetad_uid = getuid(); if(! (pw = getpwuid(gmetad_uid))) { err_sys("Getpwnam error"); } gmetad_username = strdup(pw->pw_name); } debug_msg("Going to run as user %s", gmetad_username); if( c->should_setuid ) { become_a_nobody(c->setuid_username); } if( c->write_rrds ) { if( stat( c->rrd_rootdir, &struct_stat ) ) { err_sys("Please make sure that %s exists", c->rrd_rootdir); } if ( struct_stat.st_uid != gmetad_uid ) { err_quit("Please make sure that %s is owned by %s", c->rrd_rootdir, gmetad_username); } if (! (struct_stat.st_mode & S_IWUSR) ) { err_quit("Please make sure %s has WRITE permission for %s", gmetad_username, c->rrd_rootdir); } } if(debug_level) { fprintf(stderr,"Sources are ...\n"); hash_foreach( sources, print_sources, NULL); } #ifdef WITH_MEMCACHED if (c->memcached_parameters != NULL) { memcached_connection_pool = memcached_pool(c->memcached_parameters, strlen(c->memcached_parameters)); } #endif /* WITH_MEMCACHED */ server_socket = g_tcp_socket_server_new( c->xml_port ); if (server_socket == NULL) { err_quit("tcp_listen() on xml_port failed"); } debug_msg("xml listening on port %d", c->xml_port); interactive_socket = g_tcp_socket_server_new( c->interactive_port ); if (interactive_socket == NULL) { err_quit("tcp_listen() on interactive_port failed"); } debug_msg("interactive xml listening on port %d", c->interactive_port); /* Forward metrics to Graphite using carbon protocol */ if (c->carbon_server != NULL) { if (!strcmp(c->carbon_protocol, "udp")) { carbon_udp_socket = init_carbon_udp_socket (c->carbon_server, c->carbon_port); if (carbon_udp_socket == NULL) err_quit("carbon %s socket failed for %s:%d", c->carbon_protocol, c->carbon_server, c->carbon_port); } debug_msg("carbon forwarding ready to send via %s to %s:%d", c->carbon_protocol, c->carbon_server, c->carbon_port); } #ifdef WITH_RIEMANN if (c->riemann_server !=NULL) { if (!strcmp(c->riemann_protocol, "udp")) { riemann_udp_socket = init_riemann_udp_socket (c->riemann_server, c->riemann_port); if (riemann_udp_socket == NULL) err_quit("[riemann] %s socket failed for %s:%d", c->riemann_protocol, c->riemann_server, c->riemann_port); } else { err_quit("[riemann] TCP transport not supported yet."); } debug_msg("[riemann] ready to forward metrics via %s to %s:%d", c->riemann_protocol, c->riemann_server, c->riemann_port); } #endif /* WITH_RIEMANN */ /* initialize summary mutex */ root.sum_finished = (pthread_mutex_t *) malloc(sizeof(pthread_mutex_t)); pthread_mutex_init(root.sum_finished, NULL); pthread_attr_init( &attr ); pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_DETACHED ); /* Spin off the non-interactive server threads. (Half as many as interactive). */ for (i=0; i < c->server_threads/2; i++) pthread_create(&pid, &attr, server_thread, (void*) 0); /* Spin off the interactive server threads. */ for (i=0; i < c->server_threads; i++) pthread_create(&pid, &attr, server_thread, (void*) 1); hash_foreach( sources, spin_off_the_data_threads, NULL ); /* A thread to cleanup old metrics and hosts */ pthread_create(&pid, &attr, cleanup_thread, (void *) NULL); debug_msg("cleanup thread has been started"); /* Meta data */ last_metadata = 0; for(;;) { /* Do at a random interval, between (shortest_step/2) +/- METADATA_SLEEP_RANDOMIZE percent */ random_sleep_factor = (1 + (METADATA_SLEEP_RANDOMIZE / 50.0) * ((rand_r(&rand_seed) - RAND_MAX/2)/(float)RAND_MAX)); sleep_time = random_sleep_factor * apr_time_from_sec(c->shortest_step) / 2; /* Make sure the sleep time is at least 1 second */ if(apr_time_sec(apr_time_now() + sleep_time) < (METADATA_MINIMUM_SLEEP + apr_time_sec(apr_time_now()))) sleep_time += apr_time_from_sec(METADATA_MINIMUM_SLEEP); apr_sleep(sleep_time); /* Need to be sure root is locked while doing summary */ pthread_mutex_lock(root.sum_finished); /* Flush the old values */ hash_foreach(root.metric_summary, zero_out_summary, NULL); root.hosts_up = 0; root.hosts_down = 0; /* Sum the new values */ hash_foreach(root.authority, do_root_summary, NULL ); /* summary completed */ pthread_mutex_unlock(root.sum_finished); /* Save them to RRD */ hash_foreach(root.metric_summary, write_root_summary, NULL); /* Remember our last run */ last_metadata = apr_time_now(); } apr_pool_destroy(global_context); apr_terminate(); return 0; }
static int startElement_CLUSTER(void *data, const char *el, const char **attr) { xmldata_t *xmldata = (xmldata_t *)data; struct xml_tag *xt; datum_t *hash_datum = NULL; datum_t hashkey; const char *name = NULL; int edge; int i; Source_t *source; /* Get name for hash key */ for(i = 0; attr[i]; i+=2) { xt = in_xml_list (attr[i], strlen(attr[i])); if (!xt) continue; if (xt->tag == NAME_TAG) name = attr[i+1]; } /* Only keep cluster details if we are the authority on this cluster. */ if (!authority_mode(xmldata)) return 0; source = &(xmldata->source); xmldata->sourcename = realloc(xmldata->sourcename, strlen(name)+1); strcpy(xmldata->sourcename, name); hashkey.data = (void*) name; hashkey.size = strlen(name) + 1; hash_datum = hash_lookup(&hashkey, xmldata->root); if (!hash_datum) { memset((void*) source, 0, sizeof(*source)); /* Set the identity of this host. */ source->id = CLUSTER_NODE; source->report_start = source_report_start; source->report_end = source_report_end; source->authority = hash_create(DEFAULT_CLUSTERSIZE); if (!source->authority) { err_msg("Could not create hash table for cluster %s", name); return 1; } if(gmetad_config.case_sensitive_hostnames == 0) hash_set_flags(source->authority, HASH_FLAG_IGNORE_CASE); source->metric_summary = hash_create(DEFAULT_METRICSIZE); if (!source->metric_summary) { err_msg("Could not create summary hash for cluster %s", name); return 1; } source->ds = xmldata->ds; /* Initialize the partial sum lock */ source->sum_finished = (pthread_mutex_t *) malloc(sizeof(pthread_mutex_t)); pthread_mutex_init(source->sum_finished, NULL); /* Grab the "partial sum" mutex until we are finished summarizing. */ pthread_mutex_lock(source->sum_finished); } else { memcpy(source, hash_datum->data, hash_datum->size); datum_free(hash_datum); /* We need this lock before zeroing metric sums. */ pthread_mutex_lock(source->sum_finished); source->hosts_up = 0; source->hosts_down = 0; hash_foreach(source->metric_summary, zero_out_summary, NULL); } /* Edge has the same invariant as in fillmetric(). */ edge = 0; source->owner = -1; source->latlong = -1; source->url = -1; /* Fill in cluster attributes. */ for(i = 0; attr[i]; i+=2) { xt = in_xml_list (attr[i], strlen(attr[i])); if (!xt) continue; switch( xt->tag ) { case OWNER_TAG: source->owner = addstring(source->strings, &edge, attr[i+1]); break; case LATLONG_TAG: source->latlong = addstring(source->strings, &edge, attr[i+1]); break; case URL_TAG: source->url = addstring(source->strings, &edge, attr[i+1]); break; case LOCALTIME_TAG: source->localtime = strtoul(attr[i+1], (char **) NULL, 10); break; default: break; } } source->stringslen = edge; return 0; }
static int startElement_GRID(void *data, const char *el, const char **attr) { xmldata_t *xmldata = (xmldata_t *)data; struct xml_tag *xt; datum_t *hash_datum = NULL; datum_t hashkey; const char *name = NULL; int edge; int i; Source_t *source; /* In non-scalable mode, we ignore GRIDs. */ if (!gmetad_config.scalable_mode) return 0; /* We do not keep info on nested grids. */ if (authority_mode(xmldata)) { /* Get name for hash key */ for(i = 0; attr[i]; i+=2) { xt = in_xml_list (attr[i], strlen(attr[i])); if (!xt) continue; if (xt->tag == NAME_TAG) { name = attr[i+1]; xmldata->sourcename = realloc(xmldata->sourcename, strlen(name)+1); strcpy(xmldata->sourcename, name); hashkey.data = (void*) name; hashkey.size = strlen(name) + 1; } } source = &(xmldata->source); /* Query the hash table for this cluster */ hash_datum = hash_lookup(&hashkey, xmldata->root); if (!hash_datum) { /* New Cluster */ memset((void*) source, 0, sizeof(*source)); source->id = GRID_NODE; source->report_start = source_report_start; source->report_end = source_report_end; source->metric_summary = hash_create(DEFAULT_METRICSIZE); if (!source->metric_summary) { err_msg("Could not create summary hash for cluster %s", name); return 1; } source->ds = xmldata->ds; /* Initialize the partial sum lock */ source->sum_finished = (pthread_mutex_t *) malloc(sizeof(pthread_mutex_t)); pthread_mutex_init(source->sum_finished, NULL); /* Grab the "partial sum" mutex until we are finished * summarizing. */ pthread_mutex_lock(source->sum_finished); } else { /* Found Cluster. Put into our Source buffer in xmldata. */ memcpy(source, hash_datum->data, hash_datum->size); datum_free(hash_datum); /* Grab the "partial sum" mutex until we are finished * summarizing. Needs to be done asap.*/ pthread_mutex_lock(source->sum_finished); source->hosts_up = 0; source->hosts_down = 0; hash_foreach(source->metric_summary, zero_out_summary, NULL); } /* Edge has the same invariant as in fillmetric(). */ edge = 0; /* Fill in grid attributes. */ for(i = 0; attr[i]; i+=2) { xt = in_xml_list(attr[i], strlen(attr[i])); if (!xt) continue; switch( xt->tag ) { case AUTHORITY_TAG: source->authority_ptr = addstring(source->strings, &edge, attr[i+1]); break; case LOCALTIME_TAG: source->localtime = strtoul(attr[i+1], (char **) NULL, 10); break; default: break; } } source->stringslen = edge; } /* Must happen after all processing of this tag. */ xmldata->grid_depth++; debug_msg("Found a <GRID>, depth is now %d", xmldata->grid_depth); return 0; }
static void _appbroker_calls(AppBroker * appbroker) { fputs("\n\n/* calls */\n", appbroker->fp); hash_foreach(appbroker->config, (HashForeach)_appbroker_foreach_call, appbroker); }
/* sacerdoti: This function does a tree walk while respecting the filter path. * Will return valid XML even if we have chosen a subtree. Since tree depth is * bounded, this function guarantees O(1) search time. */ static int process_path (client_t *client, char *path, datum_t *myroot, datum_t *key) { char *p, *q, *pathend; char *element; int rc, len; datum_t *found; datum_t findkey; Generic_t *node; node = (Generic_t*) myroot->data; /* Base case */ if (!path) { /* Show the subtree. */ applyfilter(client, node); if (node->children) { /* Allow this to stop early (return code = 1) */ hash_foreach(node->children, tree_report, (void*) client); } return 0; } /* Start tag */ if (node->report_start) { rc = node->report_start(node, key, client, NULL); if (rc) return 1; } /* Subtree body */ pathend = path + strlen(path); p = path+1; if (!node->children || p >= pathend) rc = process_path(client, 0, myroot, NULL); else { /* Advance to the next element along path. */ q = strchr(p, '/'); if (!q) q=pathend; /* len is limited in size by REQUESTLEN through readline() */ len = q-p; element = malloc(len + 1); if ( element == NULL ) return 1; strncpy(element, p, len); element[len] = '\0'; /* err_msg("Skipping to %s (%d)", element, len); */ /* look for element in hash table. */ findkey.data = element; findkey.size = len+1; found = hash_lookup(&findkey, node->children); if (found) { /* err_msg("Found %s", element); */ rc = process_path(client, q, found, &findkey); datum_free(found); } else if (!client->http) { /* report this element */ rc = process_path(client, 0, myroot, NULL); } else { /* element not found */ rc = 0; } free(element); } if (rc) return 1; /* End tag */ if (node->report_end) { rc = node->report_end(node, client, NULL); } return rc; }