// want to verify that we can observe the correct end counter value // in the face of concurrent updates static void concurrentCounters(void) { int i, num_threads = 4; struct counter_data data = { 0, 100000, 0, NULL }; pthread_t threads[num_threads]; data.scope = ph_counter_scope_define(NULL, "testConcurrentCounters", 1); is_true(data.scope != NULL); data.slot = ph_counter_scope_register_counter(data.scope, "dummy"); is(0, data.slot); for (i = 0; i < num_threads; i++) { pthread_create(&threads[i], NULL, spin_and_count, &data); } /* unleash the threads on the data */ ck_pr_store_uint(&data.barrier, 1); for (i = 0; i < num_threads; i++) { void *unused; pthread_join(threads[i], &unused); } is(num_threads * data.iters, ph_counter_scope_get(data.scope, data.slot)); }
static ph_counter_scope_t *resolve_facility(const char *fac) { ph_counter_scope_t *scope; scope = ph_counter_scope_resolve(memory_scope, fac); if (scope) { return scope; } return ph_counter_scope_define(memory_scope, fac, 0); }
static void memory_init(void) { memtypes_size = 1024; memtypes = malloc(memtypes_size * sizeof(*memtypes)); if (!memtypes) { memory_panic("failed to allocate memtypes array"); } memory_scope = ph_counter_scope_define(NULL, "memory", 16); if (!memory_scope) { memory_panic("failed to define memory scope"); } }
ph_memtype_t ph_memtype_register(const ph_memtype_def_t *def) { ph_memtype_t mt; ph_counter_scope_t *scope, *fac_scope; struct mem_type *mem_type; const char **names; int num_slots; fac_scope = resolve_facility(def->facility); if (!fac_scope) { return PH_MEMTYPE_INVALID; } scope = ph_counter_scope_define(fac_scope, def->name, MEM_COUNTER_SLOTS); ph_counter_scope_delref(fac_scope); if (!scope) { return PH_MEMTYPE_INVALID; } mt = ck_pr_faa_int(&next_memtype, 1); if ((uint32_t)mt >= memtypes_size) { memory_panic("You need to recompile libphenom with memtypes_size = %d", 2 * memtypes_size); } mem_type = &memtypes[mt]; memset(mem_type, 0, sizeof(*mem_type)); mem_type->def = *def; mem_type->def.facility = strdup(def->facility); mem_type->def.name = strdup(def->name); mem_type->scope = scope; if (mem_type->def.item_size == 0) { names = vsize_counter_names; num_slots = MEM_COUNTER_SLOTS; } else { names = sized_counter_names; num_slots = MEM_COUNTER_SLOTS - 1; } if (!ph_counter_scope_register_counter_block( scope, num_slots, 0, names)) { memory_panic("failed to register counter block for memory scope %s", def->name); } return mt; }
static void basicCounterFunctionality(void) { ph_counter_scope_t *scope; scope = ph_counter_scope_define(NULL, "test1", 24); is_true(scope != NULL); is_string("test1", ph_counter_scope_get_name(scope)); uint8_t slot = ph_counter_scope_register_counter(scope, "dummy"); is(0, slot); ph_counter_scope_add(scope, slot, 1); is(1, ph_counter_scope_get(scope, slot)); ph_counter_scope_add(scope, slot, 1); is(2, ph_counter_scope_get(scope, slot)); ph_counter_scope_add(scope, slot, 3); is(5, ph_counter_scope_get(scope, slot)); /* register some more slots */ const char *names[2] = {"sent", "recd"}; is_true(ph_counter_scope_register_counter_block( scope, 2, slot + 1, names)); ph_counter_block_t *block = ph_counter_block_open(scope); is_true(block != NULL); ph_counter_block_add(block, slot + 1, 3); is(3, ph_counter_scope_get(scope, slot + 1)); // C++, clogging up code with casts since the last century uint8_t bulk_slots[2] = { (uint8_t)(slot + 1), (uint8_t)(slot + 2) }; int64_t values[2] = { 1, 5 }; ph_counter_block_bulk_add(block, 2, bulk_slots, values); is(4, ph_counter_scope_get(scope, slot + 1)); is(5, ph_counter_scope_get(scope, slot + 2)); uint8_t num_slots; int64_t view_slots[10]; const char *view_names[10]; num_slots = ph_counter_scope_get_view(scope, 10, view_slots, view_names); is(3, num_slots); is(5, view_slots[0]); is(4, view_slots[1]); is(5, view_slots[2]); is_string("dummy", view_names[0]); is_string("sent", view_names[1]); is_string("recd", view_names[2]); ph_counter_scope_t *kid_scope; // Verify that attempting to define the same scope twice fails kid_scope = ph_counter_scope_define(NULL, "test1", 24); is_true(kid_scope == NULL); // Get ourselves a real child kid_scope = ph_counter_scope_define(scope, "child", 8); is_true(kid_scope != NULL); is_string("test1.child", ph_counter_scope_get_name(kid_scope)); ph_counter_scope_t *resolved; resolved = ph_counter_scope_resolve(NULL, "test1"); is(scope, resolved); resolved = ph_counter_scope_resolve(NULL, "test1.child"); is(kid_scope, resolved); ph_counter_scope_register_counter(kid_scope, "w00t"); // Test iteration struct counter_name_val counter_data[16]; int n_counters = 0; ph_counter_scope_iterator_t iter; // Collect all counter data; it is returned in an undefined order. // For the sake of testing we want to order it, so we collect the data // and then sort it ph_counter_scope_iterator_init(&iter); ph_counter_scope_t *iter_scope; while ((iter_scope = ph_counter_scope_iterator_next(&iter)) != NULL) { int i; if (strncmp(ph_counter_scope_get_name(iter_scope), "test1", 5)) { continue; } num_slots = ph_counter_scope_get_view(iter_scope, 10, view_slots, view_names); for (i = 0; i < num_slots; i++) { counter_data[n_counters].scope_name = ph_counter_scope_get_name(iter_scope); counter_data[n_counters].name = view_names[i]; counter_data[n_counters].val = view_slots[i]; n_counters++; } ph_counter_scope_delref(iter_scope); } qsort(counter_data, n_counters, sizeof(struct counter_name_val), compare_counter_name_val); struct counter_name_val expected_data[] = { { "test1", "dummy", 5 }, { "test1", "recd", 5 }, { "test1", "sent", 4 }, { "test1.child", "w00t", 0 }, }; int num_expected = sizeof(expected_data) / sizeof(expected_data[0]); is_int(num_expected, n_counters); for (int i = 0; i < n_counters; i++) { is_string(expected_data[i].scope_name, counter_data[i].scope_name); is_string(expected_data[i].name, counter_data[i].name); is(expected_data[i].val, counter_data[i].val); diag("%s.%s = %" PRIi64, counter_data[i].scope_name, counter_data[i].name, counter_data[i].val); } }
ph_memtype_t ph_memtype_register_block( uint8_t num_types, const ph_memtype_def_t *defs, ph_memtype_t *types) { int i; ph_counter_scope_t *fac_scope, *scope = NULL; ph_memtype_t mt; struct mem_type *mem_type; const char **names; uint32_t num_slots; /* must all be same facility */ for (i = 0; i < num_types; i++) { if (strcmp(defs[0].facility, defs[i].facility)) { return PH_MEMTYPE_INVALID; } } fac_scope = resolve_facility(defs[0].facility); if (!fac_scope) { return PH_MEMTYPE_INVALID; } mt = ck_pr_faa_int(&next_memtype, num_types); if ((uint32_t)mt >= memtypes_size) { memory_panic("You need to recompile libphenom with memtypes_size = %d", 2 * memtypes_size); } for (i = 0; i < num_types; i++) { mem_type = &memtypes[mt + i]; memset(mem_type, 0, sizeof(*mem_type)); mem_type->def = defs[i]; if (i == 0) { mem_type->def.facility = strdup(defs[0].facility); } else { mem_type->def.facility = memtypes[mt].def.facility; } mem_type->def.name = strdup(defs[i].name); scope = ph_counter_scope_define(fac_scope, mem_type->def.name, MEM_COUNTER_SLOTS); if (!scope) { // FIXME: cleaner error handling return PH_MEMTYPE_INVALID; } mem_type->scope = scope; if (mem_type->def.item_size == 0) { names = vsize_counter_names; num_slots = MEM_COUNTER_SLOTS; } else { names = sized_counter_names; num_slots = MEM_COUNTER_SLOTS - 1; } mem_type->first_slot = ph_counter_scope_get_num_slots(scope); if (!ph_counter_scope_register_counter_block( scope, num_slots, 0, names)) { memory_panic("failed to register counter block for memory scope %s", mem_type->def.name); } } if (types) { for (i = 0; i < num_types; i++) { types[i] = mt + i; } } ph_counter_scope_delref(fac_scope); return mt; }