ram_reply_t ramlin_mkbarrier(ramlin_barrier_t *barrier_arg, size_t capacity_arg) { RAM_FAIL_NOTNULL(barrier_arg); RAM_FAIL_NOTZERO(capacity_arg); barrier_arg->ramlinb_capacity = capacity_arg; barrier_arg->ramlinb_vacancy = capacity_arg; barrier_arg->ramlinb_cycle = 0; RAM_FAIL_TRAP(ramuix_mkmutex(&barrier_arg->ramlinb_mutex)); RAM_FAIL_EXPECT(RAM_REPLY_APIFAIL, 0 == pthread_cond_init(&barrier_arg->ramlinb_cond, NULL)); return RAM_REPLY_OK; }
ram_reply_t initdefaults(ramtest_params_t *params_arg) { RAM_FAIL_NOTNULL(params_arg); memset(params_arg, 0, sizeof(*params_arg)); params_arg->ramtestp_alloccount = DEFAULT_ALLOCATION_COUNT; /* if no thread count is specified, i'll allow the framework to * calculate it itself. */ params_arg->ramtestp_threadcount = 0; params_arg->ramtestp_mallocchance = DEFAULT_MALLOC_CHANCE; params_arg->ramtestp_minsize = DEFAULT_MINIMUM_ALLOCATION_SIZE; params_arg->ramtestp_maxsize = DEFAULT_MAXIMUM_ALLOCATION_SIZE; return RAM_REPLY_OK; }
ram_reply_t ramwin_mktlskey(ramwin_tlskey_t *key_arg) { ramwin_tlskey_t k = RAMWIN_NILTLSKEY; RAM_FAIL_NOTNULL(key_arg); *key_arg = RAMWIN_NILTLSKEY; k = TlsAlloc(); if (TLS_OUT_OF_INDEXES == k) return RAM_REPLY_RESOURCEFAIL; else { *key_arg = k; return RAM_REPLY_OK; } }
ram_reply_t ramtest_chkfill(char *ptr_arg, size_t sz_arg) { char *p = NULL, *z = NULL; RAM_FAIL_NOTNULL(ptr_arg); RAM_FAIL_NOTZERO(sz_arg); for (p = ptr_arg, z = ptr_arg + sz_arg; p < z && ((char)(sz_arg & 0xff)) == *p; ++p) continue; if (p != z) return RAM_REPLY_CORRUPT; return RAM_REPLY_OK; }
ram_reply_t ramtest_fintest(ramtest_test_t *test_arg) { RAM_FAIL_NOTNULL(test_arg); if (NULL != test_arg->ramtestt_records) free(test_arg->ramtestt_records); if (NULL != test_arg->ramtestt_threads) { /* TODO: tear down thread structures. */ free(test_arg->ramtestt_threads); } if (NULL != test_arg->ramtestt_sequence) free(test_arg->ramtestt_sequence); return RAM_REPLY_OK; }
ram_reply_t ramtest_inittest(ramtest_test_t *test_arg, const ramtest_params_t *params_arg) { ram_reply_t e = RAM_REPLY_INSANE; RAM_FAIL_NOTNULL(test_arg); memset(test_arg, 0, sizeof(*test_arg)); e = ramtest_inittest2(test_arg, params_arg); if (RAM_REPLY_OK == e) return RAM_REPLY_OK; { RAM_FAIL_PANIC(ramtest_fintest(test_arg)); RAM_FAIL_TRAP(e); return RAM_REPLY_INSANE; } }
ram_reply_t ramtest_randuint32(uint32_t *result_arg, uint32_t n0_arg, uint32_t n1_arg) { uint32_t n = 0; RAM_FAIL_NOTNULL(result_arg); RAM_FAIL_EXPECT(RAM_REPLY_DISALLOWED, n0_arg < n1_arg); /* this assertion tests the boundaries of the scaling formula. */ assert(RAMTEST_SCALERAND(uint32_t, 0, n0_arg, n1_arg) >= n0_arg); assert(RAMTEST_SCALERAND(uint32_t, RAND_MAX, n0_arg, n1_arg) < n1_arg); n = RAMTEST_SCALERAND(uint32_t, rand(), n0_arg, n1_arg); assert(n >= n0_arg); assert(n < n1_arg); *result_arg = n; return RAM_REPLY_OK; }
ram_reply_t ramvec_chkinv(ramlist_list_t *list_arg, void *context_arg) { const ramvec_node_t *node = NULL; const ramvec_chkcontext_t *c = (ramvec_chkcontext_t *)context_arg; RAM_FAIL_NOTNULL(list_arg); assert(context_arg != NULL); RAM_FAIL_TRAP(ramlist_chklist(list_arg)); node = RAM_CAST_STRUCTBASE(ramvec_node_t, ramvecn_inv, list_arg); RAM_FAIL_EXPECT(RAM_REPLY_CORRUPT, c->ramveccc_pool == node->ramvecn_vpool); /* if additional checking was specified, pass control to that function with * its associated context. */ if (c->ramveccc_chknode) RAM_FAIL_TRAP(c->ramveccc_chknode(node)); return RAM_REPLY_AGAIN; }
ram_reply_t ramwin_rcltls(void **value_arg, ramwin_tlskey_t key_arg) { void *p = NULL; RAM_FAIL_NOTNULL(value_arg); *value_arg = NULL; RAM_FAIL_EXPECT(RAM_REPLY_DISALLOWED, key_arg != RAMWIN_NILTLSKEY); p = TlsGetValue(key_arg); /* NULL is an ambiguous return value. i must check to see if an error * occurrs to be certain. */ /* TODO: TlsGetValue() doesn't check whether key_arg is valid, so i'd need * to implement this check (or ensure it's validity) myself. */ if (p || ERROR_SUCCESS == GetLastError()) { *value_arg = p; return RAM_REPLY_OK; } else return RAM_REPLY_APIFAIL; }
ram_reply_t acquire(ramtest_allocdesc_t *desc_arg, size_t size_arg, void *extra_arg, size_t threadidx_arg) { void *p = NULL; RAM_FAIL_NOTNULL(desc_arg); memset(desc_arg, 0, sizeof(*desc_arg)); RAM_FAIL_NOTZERO(size_arg); RAMANNOTATE_UNUSEDARG(extra_arg); RAMANNOTATE_UNUSEDARG(threadidx_arg); RAM_FAIL_TRAP(ram_default_acquire(&p, size_arg)); desc_arg->ramtestad_ptr = (char *)p; /* the default module doesn't use explicit pool instances. i only need * to note whether i'm using the pool or not. i shall use the value of * 1 to indicate this. */ desc_arg->ramtestad_pool = (void *)1; desc_arg->ramtestad_sz = size_arg; return RAM_REPLY_OK; }
ram_reply_t ramvec_acquire(ramvec_node_t *node_arg, int isfull_arg) { ramvec_pool_t *pool = NULL; RAM_FAIL_NOTNULL(node_arg); pool = node_arg->ramvecn_vpool; assert(pool != NULL); /* now, if the node is full, it becomes unavailable. i remove it from the * availability stack. i can ignore the return value of 'ramlist_pop()' because * access to it is already preserved through 'pool->ramvecvp_avail'.*/ if (isfull_arg) { ramlist_list_t *unused = NULL; RAM_FAIL_TRAP(ramlist_pop(&unused, &node_arg->ramvecn_avail)); RAM_FAIL_TRAP(ramlist_mknil(&node_arg->ramvecn_avail)); } return RAM_REPLY_OK; }
ram_reply_t ramtest_shuffle(void *array_arg, size_t size_arg, size_t count_arg) { char *p = (char *)array_arg; size_t i = 0; RAM_FAIL_NOTNULL(array_arg); RAM_FAIL_NOTZERO(size_arg); if (0 < count_arg) { for (i = count_arg - 1; i > 0; --i) { uint32_t j = 0; RAM_FAIL_TRAP(ramtest_randuint32(&j, 0, i)); RAM_FAIL_TRAP(rammisc_swap(&p[i * size_arg], &p[j * size_arg], size_arg)); } } return RAM_REPLY_OK; }
ram_reply_t ramtest_thread(void *arg) { ramtest_start_t *start = (ramtest_start_t *)arg; ramtest_test_t *test = NULL; size_t threadidx = 0, threadid = 0; ram_reply_t e = RAM_REPLY_INSANE; size_t unused = 0; RAM_FAIL_NOTNULL(arg); test = start->ramtests_test; threadidx = start->ramtests_threadidx; /* i'm the sole consumer of this memory; *ramtest_start()* is the sole * producer. */ free(start); threadid = threadidx + 1; RAM_FAIL_TRAP(ramtest_fprintf(&unused, stderr, "[%zu] testing...\n", threadid)); e = ramtest_thread2(test, threadidx); RAM_FAIL_TRAP(ramtest_fprintf(&unused, stderr, "[%zu] finished.\n", threadid)); return e; }
ram_reply_t ramtest_thread2(ramtest_test_t *test_arg, size_t threadidx_arg) { size_t i = 0; ram_reply_t e = RAM_REPLY_INSANE; int cachedflag = 0; ramtest_allocdesc_t cached = {0}; RAM_FAIL_NOTNULL(test_arg); RAM_FAIL_EXPECT(RAM_REPLY_RANGEFAIL, threadidx_arg < test_arg->ramtestt_params.ramtestp_threadcount); while ((RAM_REPLY_OK == (e = ramtest_next(&i, test_arg))) && i < test_arg->ramtestt_params.ramtestp_alloccount) { ramtest_allocrec_t *info = NULL; ramtest_allocdesc_t condemned = {0}; info = &test_arg->ramtestt_records[test_arg->ramtestt_sequence[i]]; /* i don't want to allocate while i'm holding the allocation record * mutex, so i'll prepare an allocation ahead of time. */ if (!cachedflag) { RAM_FAIL_TRAP(ramtest_alloc(&cached, test_arg, threadidx_arg)); } /* there's actually a race condition between the call to * *ramtest_next()* and this point. the worst that could happen * (i think) is that the first thread to draw a given record's index * might end up being the deallocating thread. */ RAM_FAIL_TRAP(rammtx_wait(&info->ramtestar_mtx)); /* if there's a pointer stored in *info->ramtestar_desc.ramtestad_ptr* * we'll assume we're the allocating thread. otherwise, we need to * deallocate. */ if (NULL == info->ramtestar_desc.ramtestad_ptr) { info->ramtestar_desc = cached; /* i signal to the next loop iteration that i'll need a new * allocation. */ cachedflag = 0; } else condemned = info->ramtestar_desc; RAM_FAIL_PANIC(rammtx_quit(&info->ramtestar_mtx)); /* if i have a condemned pointer, i need to deallocate it. */ if (condemned.ramtestad_ptr != NULL) { RAM_FAIL_TRAP(ramtest_dealloc(&condemned, test_arg, threadidx_arg)); condemned.ramtestad_ptr = NULL; } RAM_FAIL_TRAP(test_arg->ramtestt_params.ramtestp_check( test_arg->ramtestt_params.ramtestp_extra, threadidx_arg)); } RAM_FAIL_TRAP(test_arg->ramtestt_params.ramtestp_flush( test_arg->ramtestt_params.ramtestp_extra, threadidx_arg)); RAM_FAIL_TRAP(test_arg->ramtestt_params.ramtestp_check( test_arg->ramtestt_params.ramtestp_extra, threadidx_arg)); return RAM_REPLY_OK; }
ram_reply_t ramtest_describe(FILE *out_arg, const ramtest_params_t *params_arg) { size_t unused = 0; RAM_FAIL_NOTNULL(out_arg); RAM_FAIL_NOTNULL(params_arg); if (params_arg->ramtestp_dryrun) { RAM_FAIL_TRAP(ramtest_fprintf(&unused, out_arg, "you have specified the following test:\n\n")); } else { RAM_FAIL_TRAP(ramtest_fprintf(&unused, out_arg, "i will run the following test:\n\n")); } RAM_FAIL_TRAP(ramtest_fprintf(&unused, out_arg, "%zu allocation(s) (and corresponding deallocations).\n", params_arg->ramtestp_alloccount)); if (1 == params_arg->ramtestp_threadcount) { RAM_FAIL_TRAP(ramtest_fprintf(&unused, out_arg, "this test will not be parallelized.\n")); } else { RAM_FAIL_TRAP(ramtest_fprintf(&unused, out_arg, "%zu parallel operation(s) allowed.\n", params_arg->ramtestp_threadcount)); } RAM_FAIL_TRAP(ramtest_fprintf(&unused, out_arg, "%d%% of the allocations will be managed by malloc() " "and free().\n", params_arg->ramtestp_mallocchance)); RAM_FAIL_TRAP(ramtest_fprintf(&unused, out_arg, "allocations will not be smaller than %zu bytes.\n", params_arg->ramtestp_minsize)); RAM_FAIL_TRAP(ramtest_fprintf(&unused, out_arg, "allocations will not be larger than %zu bytes.\n", params_arg->ramtestp_maxsize)); if (params_arg->ramtestp_userngseed) { RAM_FAIL_TRAP(ramtest_fprintf(&unused, out_arg, "the random number generator will use seed %u.\n", params_arg->ramtestp_rngseed)); } else { RAM_FAIL_TRAP(ramtest_fprintf(&unused, out_arg, "the random number generator will use a randomly " "selected seed.\n")); } #if RAM_WANT_OVERCONFIDENT RAM_FAIL_TRAP(ramtest_fprintf(&unused, out_arg, "warning: this is an overconfident build, so the results cannot " "be trusted. rebuild with RAMOPT_UNSUPPORTED_OVERCONFIDENT " "#define'd as 0 if you wish to have reliable results.\n"); #endif if (params_arg->ramtestp_dryrun) { RAM_FAIL_TRAP(ramtest_fprintf(&unused, out_arg, "\nto run this test, omit the --dry-run option.\n")); } else RAM_FAIL_TRAP(ramtest_fprintf(&unused, out_arg, "-----\n")); return RAM_REPLY_OK; }
ram_reply_t ramtest_inittest2(ramtest_test_t *test_arg, const ramtest_params_t *params_arg) { size_t i = 0; size_t seqlen = 0; size_t maxthreads = 0; size_t unused = 0; RAM_FAIL_NOTNULL(params_arg); RAM_FAIL_NOTZERO(params_arg->ramtestp_alloccount); RAM_FAIL_EXPECT(RAM_REPLY_RANGEFAIL, params_arg->ramtestp_minsize > 0); RAM_FAIL_EXPECT(RAM_REPLY_RANGEFAIL, params_arg->ramtestp_minsize <= params_arg->ramtestp_maxsize); RAM_FAIL_EXPECT(RAM_REPLY_RANGEFAIL, params_arg->ramtestp_mallocchance >= 0); RAM_FAIL_EXPECT(RAM_REPLY_RANGEFAIL, params_arg->ramtestp_mallocchance <= 100); RAM_FAIL_NOTNULL(params_arg->ramtestp_acquire); RAM_FAIL_NOTNULL(params_arg->ramtestp_release); RAM_FAIL_NOTNULL(params_arg->ramtestp_query); /* *params_arg->ramtestp_flush* is allowed to be NULL. */ RAM_FAIL_NOTNULL(params_arg->ramtestp_check); RAM_FAIL_TRAP(ramtest_maxthreadcount(&maxthreads)); RAM_FAIL_EXPECT(RAM_REPLY_DISALLOWED, params_arg->ramtestp_threadcount <= maxthreads); test_arg->ramtestt_params = *params_arg; if (0 == test_arg->ramtestt_params.ramtestp_threadcount) { RAM_FAIL_TRAP(ramtest_defaultthreadcount( &test_arg->ramtestt_params.ramtestp_threadcount)); } test_arg->ramtestt_records = calloc(test_arg->ramtestt_params.ramtestp_alloccount, sizeof(*test_arg->ramtestt_records)); RAM_FAIL_EXPECT(RAM_REPLY_RESOURCEFAIL, NULL != test_arg->ramtestt_records); test_arg->ramtestt_threads = calloc(test_arg->ramtestt_params.ramtestp_threadcount, sizeof(*test_arg->ramtestt_threads)); RAM_FAIL_EXPECT(RAM_REPLY_RESOURCEFAIL, NULL != test_arg->ramtestt_threads); seqlen = test_arg->ramtestt_params.ramtestp_alloccount * 2; test_arg->ramtestt_sequence = calloc(seqlen, sizeof(*test_arg->ramtestt_sequence)); RAM_FAIL_EXPECT(RAM_REPLY_RESOURCEFAIL, NULL != test_arg->ramtestt_sequence); RAM_FAIL_TRAP(rammtx_mkmutex(&test_arg->ramtestt_mtx)); for (i = 0; i < test_arg->ramtestt_params.ramtestp_alloccount; ++i) { RAM_FAIL_TRAP(rammtx_mkmutex( &test_arg->ramtestt_records[i].ramtestar_mtx)); } /* the sequence array must contain two copies of each index into * *test_arg->ramtestt_records*. the first represents an allocation. * the second, a deallocation. */ for (i = 0; i < seqlen; ++i) test_arg->ramtestt_sequence[i] = (i / 2); /* i shuffle the sequence array to ensure a randomized order of * operations. */ RAM_FAIL_TRAP(ramtest_shuffle(test_arg->ramtestt_sequence, sizeof(test_arg->ramtestt_sequence[0]), seqlen)); if (!test_arg->ramtestt_params.ramtestp_userngseed) test_arg->ramtestt_params.ramtestp_rngseed = (unsigned int)time(NULL); srand(test_arg->ramtestt_params.ramtestp_rngseed); RAM_FAIL_TRAP(ramtest_fprintf(&unused, stderr, "[0] i seeded the random generator with the value %u.\n", test_arg->ramtestt_params.ramtestp_rngseed)); test_arg->ramtestt_nextrec = 0; return RAM_REPLY_OK; }