// Simulate static void bench_pending(void) { // These parameters give us 262140 items to track const size_t tree_depth = 7; const size_t num_files_per_dir = 8; const size_t num_dirs_per_dir = 4; w_string_t *root_name = w_string_new("/some/path"); struct pending_list list; const size_t alloc_size = 280000; struct timeval start, end; list.pending = calloc(alloc_size, sizeof(struct watchman_pending_fs)); list.avail = list.pending; list.end = list.pending + alloc_size; // Build a list ordered from the root (top) down to the leaves. build_list(&list, root_name, tree_depth, num_files_per_dir, num_dirs_per_dir); diag("built list with %u items", list.avail - list.pending); // Benchmark insertion in top-down order. { struct watchman_pending_collection coll; struct watchman_pending_fs *item; size_t drained = 0; w_pending_coll_init(&coll); gettimeofday(&start, NULL); for (item = list.pending; item < list.avail; item++) { w_pending_coll_add(&coll, item->path, item->now, item->flags); } drained = process_items(&coll); gettimeofday(&end, NULL); diag("took %.3fs to insert %u items into pending coll", w_timeval_diff(start, end), drained); } // and now in reverse order; this is from the leaves of the filesystem // tree up to the root, or bottom-up. This simulates the workload of // a recursive delete of a filesystem tree. { struct watchman_pending_collection coll; struct watchman_pending_fs *item; size_t drained = 0; w_pending_coll_init(&coll); gettimeofday(&start, NULL); for (item = list.avail - 1; item >= list.pending; item--) { w_pending_coll_add(&coll, item->path, item->now, item->flags); } drained = process_items(&coll); gettimeofday(&end, NULL); diag("took %.3fs to reverse insert %u items into pending coll", w_timeval_diff(start, end), drained); } }
static void process(struct cconf *config) { int i; struct buffer *data; proc_cache_purge(PROC_CACHE_PURGE_INTERVAL); dlhist_purge(DLHIST_PURGE_INTERVAL); for(i=0; i < config->nr; i++) { struct target *t = config->target + i; rss_t rss; data = http_fetch_page(t->src); if (!data) continue; rss = rss_parse(data->block, data->len); if (!rss) { error("failed to parse rss: %s", t->src); continue; } process_items(rss, t); rss_free(rss); http_free(data); } }
/****************************************************************************** * * * Function: node_history * * * * Purpose: process new history received from a slave node * * * * Parameters: * * * * Return value: SUCCEED - processed successfully * * FAIL - an error occurred * * * * Author: Alexei Vladishev * * * * Comments: * * * ******************************************************************************/ int node_history(char *data, size_t datalen) { const char *r; char *newline = NULL; char *pos; int sender_nodeid = 0, nodeid = 0, firstline = 1, events = 0, history = 0, acknowledges = 0; const ZBX_TABLE *table_sync = NULL, *table = NULL; int res = SUCCEED; char *sql1 = NULL, *sql2 = NULL, *sql3 = NULL; size_t sql1_alloc, sql2_alloc, sql3_alloc; size_t sql1_offset, sql2_offset, sql3_offset; zbx_vector_uint64_t ack_eventids; assert(data); zabbix_log(LOG_LEVEL_DEBUG, "In node_history()"); buffer_alloc = 4 * ZBX_KIBIBYTE; sql1_alloc = 32 * ZBX_KIBIBYTE; sql2_alloc = 32 * ZBX_KIBIBYTE; sql3_alloc = 32 * ZBX_KIBIBYTE; tmp_alloc = 4 * ZBX_KIBIBYTE; buffer = zbx_malloc(buffer, buffer_alloc); sql1 = zbx_malloc(sql1, sql1_alloc); sql2 = zbx_malloc(sql2, sql2_alloc); sql3 = zbx_malloc(sql3, sql3_alloc); tmp = zbx_malloc(tmp, tmp_alloc); zbx_vector_uint64_create(&ack_eventids); DBbegin(); for (r = data; *r != '\0' && res == SUCCEED;) { if (NULL != (newline = strchr(r, '\n'))) *newline = '\0'; if (1 == firstline) { zbx_get_next_field(&r, &buffer, &buffer_alloc, ZBX_DM_DELIMITER); /* constant 'History' */ zbx_get_next_field(&r, &buffer, &buffer_alloc, ZBX_DM_DELIMITER); /* sender_nodeid */ sender_nodeid=atoi(buffer); zbx_get_next_field(&r, &buffer, &buffer_alloc, ZBX_DM_DELIMITER); /* nodeid */ nodeid=atoi(buffer); zbx_get_next_field(&r, &buffer, &buffer_alloc, ZBX_DM_DELIMITER); /* tablename */ if (FAIL == is_direct_slave_node(sender_nodeid)) { zabbix_log(LOG_LEVEL_ERR, "NODE %d: Received data from node %d" " that is not a direct slave node", CONFIG_NODEID, sender_nodeid); res = FAIL; } if (FAIL == is_slave_node(CONFIG_NODEID, nodeid)) { zabbix_log(LOG_LEVEL_ERR, "NODE %d: Received history for unknown slave node %d", CONFIG_NODEID, nodeid); res = FAIL; } table = DBget_table(buffer); if (NULL != table && 0 == (table->flags & (ZBX_HISTORY | ZBX_HISTORY_SYNC))) table = NULL; if (NULL != table && 0 != (table->flags & ZBX_HISTORY_SYNC)) { table_sync = table; if (NULL != (pos = strstr(buffer, "_sync"))) { *pos = '\0'; table = DBget_table(buffer); } } if (NULL == table) { zabbix_log(LOG_LEVEL_ERR, "NODE %d: Invalid received data: unknown tablename \"%s\"", CONFIG_NODEID, buffer); res = FAIL; } else { if (0 == strcmp(table->table, "events")) events = 1; if (0 == strncmp(table->table, "history", 7)) history = 1; if (0 == strcmp(table->table, "acknowledges")) acknowledges = 1; } if (NULL != newline) { zabbix_log(LOG_LEVEL_WARNING, "NODE %d: Received %s from node %d for node %d datalen " ZBX_FS_SIZE_T, CONFIG_NODEID, buffer, sender_nodeid, nodeid, (zbx_fs_size_t)datalen); } firstline = 0; sql1_offset = 0; sql2_offset = 0; sql3_offset = 0; } else if (NULL != table) { if (events) { res = process_record_event(sender_nodeid, nodeid, table, r); } else { res = process_record(&sql1, &sql1_alloc, &sql1_offset, sender_nodeid, nodeid, table, r, newline ? 0 : 1, acknowledges, &ack_eventids); if (SUCCEED == res && 0 != history) { res = process_items(&sql2, &sql2_alloc, &sql2_offset, sender_nodeid, nodeid, table, r, newline ? 0 : 1); } if (SUCCEED == res && NULL != table_sync && 0 != CONFIG_MASTER_NODEID) { res = process_record(&sql3, &sql3_alloc, &sql3_offset, sender_nodeid, nodeid, table_sync, r, newline ? 0 : 1, 0, NULL); } } } if (newline != NULL) { *newline = '\n'; r = newline + 1; } else break; } if (SUCCEED == res) DBcommit(); else DBrollback(); zbx_vector_uint64_destroy(&ack_eventids); zbx_free(tmp); zbx_free(sql1); zbx_free(sql2); zbx_free(sql3); zbx_free(buffer); return res; }
void process_record(JNL_FEED_DESC *detail, short currency_code, tiny is_detail) { JNL_CURRENCY currency_info; int i = 0; tiny table = 0; tiny field = 0; get_currency_info(currency_code, ¤cy_info); for (i = 0; i < detail->count; i++) { table = detail->layout[i].table -1; field = detail->layout[i].field -1; switch(detail->layout[i].table) { case TABLE_JNL_TRANS: case TABLE_JNL_DETAIL: { if (g_iFeedGrouping && is_detail) { if (detail->layout[i].field != FIELD_JT_USAGE_UNITS && detail->layout[i].field != FIELD_JD_USAGE_UNITS && detail->layout[i].field != FIELD_JT_USAGE_ITEMS && detail->layout[i].field != FIELD_JD_USAGE_ITEMS && detail->layout[i].field != FIELD_JT_REPORTED_AMOUNT && detail->layout[i].field != FIELD_JD_REPORTED_AMOUNT) { process_items(&detail->layout[i], detail->values[i].value, ¤cy_info); } } else process_items(&detail->layout[i], detail->values[i].value, ¤cy_info); break; } case TABLE_JNL_CUSTOM: case TABLE_OWNING_COST_CTR: case TABLE_REV_RCV_COST_CTR: case TABLE_JNL_EMF_BOOK: { process_items(&detail->layout[i], detail->values[i].value, ¤cy_info); break; } case TABLE_RATE_CURRENCY: { process_currency_items(detail->layout[i], ¤cy_info, detail->values[i].value); break; } case TABLE_JNL_SUM_TOTALS: break; case TABLE_PACK_IDS: { sprintf(detail->values[i].value, "J%.03d", (g_iJnl_ref_no%1000) + s_iPackCounter); process_jnl_field_trim(detail->layout[i].trim, detail->values[i].value, detail->layout[i].field_size); break; } case TABLE_FILE_LINE_COUNTER: if ( (g_iFeedGrouping == FALSE) || (g_iFeedGrouping == TRUE && is_detail == FALSE ) ) { detail->LineCounter++; sprintf(detail->values[i].value, "%d", detail->LineCounter); process_jnl_field_trim(detail->layout[i].trim, detail->values[i].value, detail->layout[i].field_size); } break; case TABLE_JNL_CODES: case TABLE_JNL_RUNS_STATUS: case TABLE_JNL_RUN_DT: case TABLE_JNL_HARDCODED: default: break; } } }