ram_reply_t ramwin_waitonbarrier(ramwin_barrier_t *barrier_arg) { LONG n = 0; RAM_FAIL_NOTNULL(barrier_arg); RAM_FAIL_EXPECT(RAM_REPLY_INCONSISTENT, barrier_arg->ramwinb_capacity > 0); n = InterlockedDecrement(&barrier_arg->ramwinb_vacancy); if (n > 0) { DWORD result = WAIT_FAILED; result = WaitForSingleObject(barrier_arg->ramwinb_event, INFINITE); RAM_FAIL_EXPECT(RAM_REPLY_APIFAIL, WAIT_OBJECT_0 == result); } else if (0 == n) { /* if i'm the last one waiting on the barrier, i set it to the signaled state, * rather than wait on it.*/ RAM_FAIL_EXPECT(RAM_REPLY_APIFAIL, SetEvent(barrier_arg->ramwinb_event)); } else { assert(n < 0); /* if n < 0, then it means that i should inform the caller of an underflow. */ return RAM_REPLY_UNDERFLOW; } return RAM_REPLY_OK; }
ram_reply_t ramwin_rmbarrier(ramwin_barrier_t *barrier_arg) { RAM_FAIL_NOTNULL(barrier_arg); /* i don't allow destruction of the barrier while it's in use. */ RAM_FAIL_EXPECT(RAM_REPLY_UNSUPPORTED, barrier_arg->ramwinb_vacancy == barrier_arg->ramwinb_capacity); RAM_FAIL_EXPECT(RAM_REPLY_APIFAIL, CloseHandle(&barrier_arg->ramwinb_event)); return RAM_REPLY_OK; }
ram_reply_t ramwin_release(char *pages_arg) { int ispage = 0; RAM_FAIL_NOTNULL(pages_arg); RAM_FAIL_TRAP(rammem_ispage(&ispage, pages_arg)); RAM_FAIL_EXPECT(RAM_REPLY_DISALLOWED, ispage); RAM_FAIL_EXPECT(RAM_REPLY_APIFAIL, VirtualFree(pages_arg, 0, MEM_RELEASE)); return RAM_REPLY_OK; }
ram_reply_t ramlin_rmbarrier(ramlin_barrier_t *barrier_arg) { RAM_FAIL_NOTNULL(barrier_arg); /* i don't allow destruction of the barrier while it's in use. */ RAM_FAIL_EXPECT(RAM_REPLY_UNSUPPORTED, barrier_arg->ramlinb_vacancy == barrier_arg->ramlinb_capacity); RAM_FAIL_EXPECT(RAM_REPLY_APIFAIL, 0 == pthread_cond_destroy(&barrier_arg->ramlinb_cond)); RAM_FAIL_TRAP(rammtx_rmmutex(&barrier_arg->ramlinb_mutex)); return RAM_REPLY_OK; }
ram_reply_t ramwin_decommit(char *page_arg) { int ispage = 0; RAM_FAIL_NOTNULL(page_arg); RAM_FAIL_EXPECT(RAM_REPLY_INCONSISTENT, ramwin_sysinfo.dwPageSize != 0); RAM_FAIL_TRAP(rammem_ispage(&ispage, page_arg)); RAM_FAIL_EXPECT(RAM_REPLY_DISALLOWED, ispage); RAM_FAIL_EXPECT(RAM_REPLY_APIFAIL, VirtualFree(page_arg, ramwin_sysinfo.dwPageSize, MEM_DECOMMIT)); return RAM_REPLY_OK; }
ram_reply_t ramwin_reset(char *page_arg) { int ispage = 0; RAM_FAIL_NOTNULL(page_arg); RAM_FAIL_EXPECT(RAM_REPLY_INCONSISTENT, ramwin_sysinfo.dwPageSize != 0); RAM_FAIL_TRAP(rammem_ispage(&ispage, page_arg)); RAM_FAIL_EXPECT(RAM_REPLY_DISALLOWED, ispage); RAM_FAIL_EXPECT(RAM_REPLY_APIFAIL, VirtualAlloc(page_arg, ramwin_sysinfo.dwPageSize, MEM_RESET, PAGE_NOACCESS) == page_arg); return RAM_REPLY_OK; }
ram_reply_t ramwin_bulkalloc(char **pages_arg) { char *p = NULL; RAM_FAIL_NOTNULL(pages_arg); *pages_arg = NULL; RAM_FAIL_EXPECT(RAM_REPLY_INCONSISTENT, ramwin_sysinfo.dwAllocationGranularity != 0); p = (char *)VirtualAlloc(NULL, ramwin_sysinfo.dwAllocationGranularity, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); RAM_FAIL_EXPECT(RAM_REPLY_APIFAIL, p != NULL); *pages_arg = p; return RAM_REPLY_OK; }
ram_reply_t ramwin_reserve(char **pages_arg) { char *p = NULL; RAM_FAIL_NOTNULL(pages_arg); *pages_arg = NULL; RAM_FAIL_EXPECT(RAM_REPLY_INCONSISTENT, ramwin_sysinfo.dwAllocationGranularity != 0); p = (char *)VirtualAlloc(NULL, ramwin_sysinfo.dwAllocationGranularity, MEM_RESERVE, PAGE_NOACCESS); RAM_FAIL_EXPECT(RAM_REPLY_APIFAIL, p != NULL); *pages_arg = p; return RAM_REPLY_OK; }
ram_reply_t ramtest_start(ramtest_test_t *test_arg) { size_t i = 0; size_t unused = 0; RAM_FAIL_NOTNULL(test_arg); for (i = 0; i < test_arg->ramtestt_params.ramtestp_threadcount; ++i) { ramtest_start_t *start = NULL; const size_t threadid = i + 1; RAM_FAIL_TRAP(ramtest_fprintf(&unused, stderr, "[0] starting thread %zu...\n", threadid)); /* i'm the sole producer of this memory; *ramtest_start()* is the sole * consumer. */ start = (ramtest_start_t *)calloc(sizeof(*start), 1); RAM_FAIL_EXPECT(RAM_REPLY_RESOURCEFAIL, NULL != start); start->ramtests_test = test_arg; start->ramtests_threadidx = i; RAM_FAIL_TRAP(ramthread_mkthread(&test_arg->ramtestt_threads[i], &ramtest_thread, start)); RAM_FAIL_TRAP(ramtest_fprintf(&unused, stderr, "[0] started thread %zu.\n", threadid)); } return RAM_REPLY_OK; }
ram_reply_t ramwin_cpucount(size_t *cpucount_arg) { RAM_FAIL_NOTNULL(cpucount_arg); RAM_FAIL_EXPECT(RAM_REPLY_INCONSISTENT, ramwin_sysinfo.dwNumberOfProcessors != 0); *cpucount_arg = ramwin_sysinfo.dwNumberOfProcessors; return RAM_REPLY_OK; }
ram_reply_t ramwin_rmtlskey(ramwin_tlskey_t key_arg) { RAM_FAIL_EXPECT(RAM_REPLY_DISALLOWED, key_arg != RAMWIN_NILTLSKEY); if (TlsFree(key_arg)) return RAM_REPLY_OK; else return RAM_REPLY_APIFAIL; }
ram_reply_t ramwin_pagesize(size_t *pagesz_arg) { RAM_FAIL_NOTNULL(pagesz_arg); RAM_FAIL_EXPECT(RAM_REPLY_INCONSISTENT, ramwin_sysinfo.dwPageSize != 0); *pagesz_arg = ramwin_sysinfo.dwPageSize; return RAM_REPLY_OK; }
ram_reply_t ramwin_mmapgran(size_t *mmapgran_arg) { RAM_FAIL_NOTNULL(mmapgran_arg); RAM_FAIL_EXPECT(RAM_REPLY_INCONSISTENT, ramwin_sysinfo.dwAllocationGranularity != 0); *mmapgran_arg = ramwin_sysinfo.dwAllocationGranularity; return RAM_REPLY_OK; }
ram_reply_t ramwin_jointhread(ram_reply_t *reply_arg, ramwin_thread_t thread_arg) { DWORD exitcode = STILL_ACTIVE, result = WAIT_FAILED; RAM_FAIL_NOTNULL(reply_arg); *reply_arg = RAM_REPLY_INSANE; RAM_FAIL_NOTNULL(thread_arg); result = WaitForSingleObject(thread_arg, INFINITE); RAM_FAIL_EXPECT(RAM_REPLY_APIFAIL, WAIT_OBJECT_0 == result); RAM_FAIL_EXPECT(RAM_REPLY_APIFAIL, GetExitCodeThread(thread_arg, &exitcode)); RAM_FAIL_EXPECT(RAM_REPLY_INSANE, exitcode != STILL_ACTIVE); *reply_arg = (ram_reply_t)exitcode; return RAM_REPLY_OK; }
ram_reply_t ramtest_dealloc(ramtest_allocdesc_t *ptrdesc_arg, ramtest_test_t *test_arg, size_t threadidx_arg) { void *pool = NULL; size_t sz = 0; ram_reply_t e = RAM_REPLY_INSANE; RAM_FAIL_NOTNULL(ptrdesc_arg); RAM_FAIL_NOTNULL(ptrdesc_arg->ramtestad_ptr); RAM_FAIL_NOTNULL(test_arg); RAM_FAIL_EXPECT(RAM_REPLY_RANGEFAIL, threadidx_arg < test_arg->ramtestt_params.ramtestp_threadcount); if (!test_arg->ramtestt_params.ramtestp_nofill) { RAM_FAIL_TRAP(ramtest_chkfill(ptrdesc_arg->ramtestad_ptr, ptrdesc_arg->ramtestad_sz)); } e = test_arg->ramtestt_params.ramtestp_query(&pool, &sz, ptrdesc_arg->ramtestad_ptr, test_arg->ramtestt_params.ramtestp_extra); switch (e) { default: RAM_FAIL_TRAP(e); return RAM_REPLY_INSANE; case RAM_REPLY_OK: RAM_FAIL_EXPECT(RAM_REPLY_CORRUPT, ptrdesc_arg->ramtestad_pool == pool); /* the size won't always be identical due to the nature of mux pools. * the size will never be smaller, though. */ RAM_FAIL_EXPECT(RAM_REPLY_CORRUPT, sz >= ptrdesc_arg->ramtestad_sz); RAM_FAIL_TRAP( test_arg->ramtestt_params.ramtestp_release(ptrdesc_arg)); break; case RAM_REPLY_NOTFOUND: RAM_FAIL_EXPECT(RAM_REPLY_INSANE, NULL == ptrdesc_arg->ramtestad_pool); free(ptrdesc_arg->ramtestad_ptr); break; } return RAM_REPLY_OK; }
ram_reply_t ramwin_stotls(ramwin_tlskey_t key_arg, void *value_arg) { RAM_FAIL_EXPECT(RAM_REPLY_DISALLOWED, key_arg != RAMWIN_NILTLSKEY); RAM_FAIL_NOTNULL(value_arg); if (TlsSetValue(key_arg, value_arg)) return RAM_REPLY_OK; else return RAM_REPLY_APIFAIL; }
ram_reply_t ramwin_mkbarrier(ramwin_barrier_t *barrier_arg, size_t capacity_arg) { RAM_FAIL_NOTNULL(barrier_arg); memset(barrier_arg, 0, sizeof(*barrier_arg)); RAM_FAIL_TRAP(ram_cast_sizetolong(&barrier_arg->ramwinb_capacity, capacity_arg)); barrier_arg->ramwinb_vacancy = barrier_arg->ramwinb_capacity; barrier_arg->ramwinb_event = CreateEvent(NULL, FALSE, FALSE, NULL); RAM_FAIL_EXPECT(RAM_REPLY_APIFAIL, NULL != barrier_arg->ramwinb_event); return RAM_REPLY_OK; }
ram_reply_t ramvec_chkavail(ramlist_list_t *list_arg, void *context_arg) { const ramvec_node_t *node = NULL; const ramvec_chkcontext_t *c = (ramvec_chkcontext_t *)context_arg; RAM_FAIL_NOTNULL(list_arg); assert(context_arg != NULL); RAM_FAIL_TRAP(ramlist_chklist(list_arg)); node = RAM_CAST_STRUCTBASE(ramvec_node_t, ramvecn_avail, list_arg); RAM_FAIL_EXPECT(RAM_REPLY_CORRUPT, c->ramveccc_pool == node->ramvecn_vpool); return RAM_REPLY_AGAIN; }
ram_reply_t ramlin_waitonbarrier2(ramlin_barrier_t *barrier_arg) { uintptr_t cycle = 0; assert(barrier_arg != NULL); /* TODO: is it possible to test whether the mutex is locked? */ cycle = barrier_arg->ramlinb_cycle; /* am i the final thread to wait at the barrier? */ if (0 == --barrier_arg->ramlinb_vacancy) { /* i increment the cycle number to signal to the other threads that * they should stop polling pthread_cond_wait() and return. */ ++barrier_arg->ramlinb_cycle; /* this is my opportunity to reset the vacancy counter without * the possibility of introducing a race condition. */ barrier_arg->ramlinb_vacancy = barrier_arg->ramlinb_capacity; RAM_FAIL_EXPECT(RAM_REPLY_APIFAIL, 0 == pthread_cond_broadcast(&barrier_arg->ramlinb_cond)); return RAM_REPLY_OK; } else { /* i am not the final thread. i poll pthread_cond_wait(), waiting * for the cycle counter to increment, signaling that the final thread * has reached the barrier. */ while (cycle == barrier_arg->ramlinb_cycle) { RAM_FAIL_EXPECT(RAM_REPLY_APIFAIL, 0 == pthread_cond_wait(&barrier_arg->ramlinb_cond, &barrier_arg->ramlinb_mutex)); } return RAM_REPLY_OK; } }
ram_reply_t ramlin_mkbarrier(ramlin_barrier_t *barrier_arg, size_t capacity_arg) { RAM_FAIL_NOTNULL(barrier_arg); RAM_FAIL_NOTZERO(capacity_arg); barrier_arg->ramlinb_capacity = capacity_arg; barrier_arg->ramlinb_vacancy = capacity_arg; barrier_arg->ramlinb_cycle = 0; RAM_FAIL_TRAP(ramuix_mkmutex(&barrier_arg->ramlinb_mutex)); RAM_FAIL_EXPECT(RAM_REPLY_APIFAIL, 0 == pthread_cond_init(&barrier_arg->ramlinb_cond, NULL)); return RAM_REPLY_OK; }
ram_reply_t ramwin_basename2(char *dest_arg, size_t len_arg, const char *pathn_arg) { char drive[_MAX_DRIVE]; char dir[_MAX_DIR]; char filen[_MAX_FNAME]; char ext[_MAX_EXT]; errno_t e = -1; int n = -1; RAM_FAIL_NOTNULL(dest_arg); RAM_FAIL_NOTZERO(len_arg); RAM_FAIL_NOTNULL(pathn_arg); /* first, i use _splitpath_s() to get the components. */ e = _splitpath_s(pathn_arg, drive, _MAX_DRIVE, dir, _MAX_DIR, filen, _MAX_FNAME, ext, _MAX_EXT); RAM_FAIL_EXPECT(RAM_REPLY_APIFAIL, 0 == e); /* now, i need to reassemble them into the equivalent of a basename. */ n = _snprintf_s(dest_arg, len_arg, _MAX_FNAME + _MAX_EXT, "%s%s", filen, ext); RAM_FAIL_EXPECT(RAM_REPLY_APIFAIL, -1 < n); return RAM_REPLY_OK; }
ram_reply_t ramtest_vfprintf(size_t *count_arg, FILE *file_arg, const char *fmt_arg, va_list moar_arg) { int count = -1; RAM_FAIL_NOTNULL(count_arg); *count_arg = 0; RAM_FAIL_NOTNULL(file_arg); RAM_FAIL_NOTNULL(fmt_arg); count = trio_vfprintf(file_arg, fmt_arg, moar_arg); RAM_FAIL_EXPECT(RAM_REPLY_CRTFAIL, 0 <= count); RAM_FAIL_TRAP(ram_cast_inttosize(count_arg, count)); return RAM_REPLY_OK; }
ram_reply_t ramtest_randuint32(uint32_t *result_arg, uint32_t n0_arg, uint32_t n1_arg) { uint32_t n = 0; RAM_FAIL_NOTNULL(result_arg); RAM_FAIL_EXPECT(RAM_REPLY_DISALLOWED, n0_arg < n1_arg); /* this assertion tests the boundaries of the scaling formula. */ assert(RAMTEST_SCALERAND(uint32_t, 0, n0_arg, n1_arg) >= n0_arg); assert(RAMTEST_SCALERAND(uint32_t, RAND_MAX, n0_arg, n1_arg) < n1_arg); n = RAMTEST_SCALERAND(uint32_t, rand(), n0_arg, n1_arg); assert(n >= n0_arg); assert(n < n1_arg); *result_arg = n; return RAM_REPLY_OK; }
ram_reply_t ramwin_mkthread(ramwin_thread_t *thread_arg, ramsys_threadmain_t main_arg, void *arg_arg) { HANDLE thread = NULL; RAM_FAIL_NOTNULL(thread_arg); *thread_arg = NULL; RAM_FAIL_NOTNULL(main_arg); /* TODO: casting 'main_arg' to LPTHREAD_START_ROUTINE simplifies the code but it * sacrifies type safety. if i end up needing a more sophisticated threading * interface, i'll consider changing this. */ thread = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)main_arg, arg_arg, 0, NULL); RAM_FAIL_EXPECT(RAM_REPLY_APIFAIL, NULL != thread); *thread_arg = thread; return RAM_REPLY_OK; }
ram_reply_t ramvec_chkinv(ramlist_list_t *list_arg, void *context_arg) { const ramvec_node_t *node = NULL; const ramvec_chkcontext_t *c = (ramvec_chkcontext_t *)context_arg; RAM_FAIL_NOTNULL(list_arg); assert(context_arg != NULL); RAM_FAIL_TRAP(ramlist_chklist(list_arg)); node = RAM_CAST_STRUCTBASE(ramvec_node_t, ramvecn_inv, list_arg); RAM_FAIL_EXPECT(RAM_REPLY_CORRUPT, c->ramveccc_pool == node->ramvecn_vpool); /* if additional checking was specified, pass control to that function with * its associated context. */ if (c->ramveccc_chknode) RAM_FAIL_TRAP(c->ramveccc_chknode(node)); return RAM_REPLY_AGAIN; }
ram_reply_t ramtest_alloc(ramtest_allocdesc_t *newptr_arg, ramtest_test_t *test_arg, size_t threadidx_arg) { int32_t roll = 0; ramtest_allocdesc_t desc = {0}; uint32_t n = 0; RAM_FAIL_NOTNULL(newptr_arg); memset(newptr_arg, 0, sizeof(*newptr_arg)); RAM_FAIL_NOTNULL(test_arg); RAM_FAIL_EXPECT(RAM_REPLY_RANGEFAIL, threadidx_arg < test_arg->ramtestt_params.ramtestp_threadcount); RAM_FAIL_TRAP(ramtest_randuint32(&n, test_arg->ramtestt_params.ramtestp_minsize, test_arg->ramtestt_params.ramtestp_maxsize + 1)); desc.ramtestad_sz = n; /* i want a certain percentage of allocations to be performed by * an alternate allocator. */ RAM_FAIL_TRAP(ramtest_randint32(&roll, 0, 100)); /* splint reports a problem in the next line regarding the difference * in type between the two integers being compared. i don't understand * why it's necessary to consider int32_t and int separate types and i * can't find any information about 16-bit programming platforms, so * i'm going to suppress it. */ if (/*@t1@*/roll < test_arg->ramtestt_params.ramtestp_mallocchance) { desc.ramtestad_pool = NULL; desc.ramtestad_ptr = malloc(desc.ramtestad_sz); } else { RAM_FAIL_TRAP(test_arg->ramtestt_params.ramtestp_acquire(&desc, desc.ramtestad_sz, test_arg->ramtestt_params.ramtestp_extra, threadidx_arg)); } if (!test_arg->ramtestt_params.ramtestp_nofill) RAM_FAIL_TRAP(ramtest_fill(desc.ramtestad_ptr, desc.ramtestad_sz)); *newptr_arg = desc; return RAM_REPLY_OK; }
ram_reply_t ramwin_rcltls(void **value_arg, ramwin_tlskey_t key_arg) { void *p = NULL; RAM_FAIL_NOTNULL(value_arg); *value_arg = NULL; RAM_FAIL_EXPECT(RAM_REPLY_DISALLOWED, key_arg != RAMWIN_NILTLSKEY); p = TlsGetValue(key_arg); /* NULL is an ambiguous return value. i must check to see if an error * occurrs to be certain. */ /* TODO: TlsGetValue() doesn't check whether key_arg is valid, so i'd need * to implement this check (or ensure it's validity) myself. */ if (p || ERROR_SUCCESS == GetLastError()) { *value_arg = p; return RAM_REPLY_OK; } else return RAM_REPLY_APIFAIL; }
ram_reply_t ramtest_thread2(ramtest_test_t *test_arg, size_t threadidx_arg) { size_t i = 0; ram_reply_t e = RAM_REPLY_INSANE; int cachedflag = 0; ramtest_allocdesc_t cached = {0}; RAM_FAIL_NOTNULL(test_arg); RAM_FAIL_EXPECT(RAM_REPLY_RANGEFAIL, threadidx_arg < test_arg->ramtestt_params.ramtestp_threadcount); while ((RAM_REPLY_OK == (e = ramtest_next(&i, test_arg))) && i < test_arg->ramtestt_params.ramtestp_alloccount) { ramtest_allocrec_t *info = NULL; ramtest_allocdesc_t condemned = {0}; info = &test_arg->ramtestt_records[test_arg->ramtestt_sequence[i]]; /* i don't want to allocate while i'm holding the allocation record * mutex, so i'll prepare an allocation ahead of time. */ if (!cachedflag) { RAM_FAIL_TRAP(ramtest_alloc(&cached, test_arg, threadidx_arg)); } /* there's actually a race condition between the call to * *ramtest_next()* and this point. the worst that could happen * (i think) is that the first thread to draw a given record's index * might end up being the deallocating thread. */ RAM_FAIL_TRAP(rammtx_wait(&info->ramtestar_mtx)); /* if there's a pointer stored in *info->ramtestar_desc.ramtestad_ptr* * we'll assume we're the allocating thread. otherwise, we need to * deallocate. */ if (NULL == info->ramtestar_desc.ramtestad_ptr) { info->ramtestar_desc = cached; /* i signal to the next loop iteration that i'll need a new * allocation. */ cachedflag = 0; } else condemned = info->ramtestar_desc; RAM_FAIL_PANIC(rammtx_quit(&info->ramtestar_mtx)); /* if i have a condemned pointer, i need to deallocate it. */ if (condemned.ramtestad_ptr != NULL) { RAM_FAIL_TRAP(ramtest_dealloc(&condemned, test_arg, threadidx_arg)); condemned.ramtestad_ptr = NULL; } RAM_FAIL_TRAP(test_arg->ramtestt_params.ramtestp_check( test_arg->ramtestt_params.ramtestp_extra, threadidx_arg)); } RAM_FAIL_TRAP(test_arg->ramtestt_params.ramtestp_flush( test_arg->ramtestt_params.ramtestp_extra, threadidx_arg)); RAM_FAIL_TRAP(test_arg->ramtestt_params.ramtestp_check( test_arg->ramtestt_params.ramtestp_extra, threadidx_arg)); return RAM_REPLY_OK; }
ram_reply_t ramtest_inittest2(ramtest_test_t *test_arg, const ramtest_params_t *params_arg) { size_t i = 0; size_t seqlen = 0; size_t maxthreads = 0; size_t unused = 0; RAM_FAIL_NOTNULL(params_arg); RAM_FAIL_NOTZERO(params_arg->ramtestp_alloccount); RAM_FAIL_EXPECT(RAM_REPLY_RANGEFAIL, params_arg->ramtestp_minsize > 0); RAM_FAIL_EXPECT(RAM_REPLY_RANGEFAIL, params_arg->ramtestp_minsize <= params_arg->ramtestp_maxsize); RAM_FAIL_EXPECT(RAM_REPLY_RANGEFAIL, params_arg->ramtestp_mallocchance >= 0); RAM_FAIL_EXPECT(RAM_REPLY_RANGEFAIL, params_arg->ramtestp_mallocchance <= 100); RAM_FAIL_NOTNULL(params_arg->ramtestp_acquire); RAM_FAIL_NOTNULL(params_arg->ramtestp_release); RAM_FAIL_NOTNULL(params_arg->ramtestp_query); /* *params_arg->ramtestp_flush* is allowed to be NULL. */ RAM_FAIL_NOTNULL(params_arg->ramtestp_check); RAM_FAIL_TRAP(ramtest_maxthreadcount(&maxthreads)); RAM_FAIL_EXPECT(RAM_REPLY_DISALLOWED, params_arg->ramtestp_threadcount <= maxthreads); test_arg->ramtestt_params = *params_arg; if (0 == test_arg->ramtestt_params.ramtestp_threadcount) { RAM_FAIL_TRAP(ramtest_defaultthreadcount( &test_arg->ramtestt_params.ramtestp_threadcount)); } test_arg->ramtestt_records = calloc(test_arg->ramtestt_params.ramtestp_alloccount, sizeof(*test_arg->ramtestt_records)); RAM_FAIL_EXPECT(RAM_REPLY_RESOURCEFAIL, NULL != test_arg->ramtestt_records); test_arg->ramtestt_threads = calloc(test_arg->ramtestt_params.ramtestp_threadcount, sizeof(*test_arg->ramtestt_threads)); RAM_FAIL_EXPECT(RAM_REPLY_RESOURCEFAIL, NULL != test_arg->ramtestt_threads); seqlen = test_arg->ramtestt_params.ramtestp_alloccount * 2; test_arg->ramtestt_sequence = calloc(seqlen, sizeof(*test_arg->ramtestt_sequence)); RAM_FAIL_EXPECT(RAM_REPLY_RESOURCEFAIL, NULL != test_arg->ramtestt_sequence); RAM_FAIL_TRAP(rammtx_mkmutex(&test_arg->ramtestt_mtx)); for (i = 0; i < test_arg->ramtestt_params.ramtestp_alloccount; ++i) { RAM_FAIL_TRAP(rammtx_mkmutex( &test_arg->ramtestt_records[i].ramtestar_mtx)); } /* the sequence array must contain two copies of each index into * *test_arg->ramtestt_records*. the first represents an allocation. * the second, a deallocation. */ for (i = 0; i < seqlen; ++i) test_arg->ramtestt_sequence[i] = (i / 2); /* i shuffle the sequence array to ensure a randomized order of * operations. */ RAM_FAIL_TRAP(ramtest_shuffle(test_arg->ramtestt_sequence, sizeof(test_arg->ramtestt_sequence[0]), seqlen)); if (!test_arg->ramtestt_params.ramtestp_userngseed) test_arg->ramtestt_params.ramtestp_rngseed = (unsigned int)time(NULL); srand(test_arg->ramtestt_params.ramtestp_rngseed); RAM_FAIL_TRAP(ramtest_fprintf(&unused, stderr, "[0] i seeded the random generator with the value %u.\n", test_arg->ramtestt_params.ramtestp_rngseed)); test_arg->ramtestt_nextrec = 0; return RAM_REPLY_OK; }