ram_reply_t rammux_acquire(void **newptr_arg, rammux_pool_t *mpool_arg, size_t size_arg) { ramalgn_pool_t *apool = NULL; ram_reply_t e = RAM_REPLY_INSANE; RAM_FAIL_NOTNULL(newptr_arg); *newptr_arg = NULL; RAM_FAIL_NOTNULL(mpool_arg); RAM_FAIL_NOTZERO(size_arg); e = rammux_getalgnpool(&apool, size_arg, mpool_arg); switch (e) { default: RAM_FAIL_TRAP(e); /* i shouldn't ever get here. */ return RAM_REPLY_INSANE; case RAM_REPLY_RANGEFAIL: return e; case RAM_REPLY_OK: break; } RAM_FAIL_TRAP(ramalgn_acquire(newptr_arg, apool)); return RAM_REPLY_OK; }
ram_reply_t ramtest_start(ramtest_test_t *test_arg) { size_t i = 0; size_t unused = 0; RAM_FAIL_NOTNULL(test_arg); for (i = 0; i < test_arg->ramtestt_params.ramtestp_threadcount; ++i) { ramtest_start_t *start = NULL; const size_t threadid = i + 1; RAM_FAIL_TRAP(ramtest_fprintf(&unused, stderr, "[0] starting thread %zu...\n", threadid)); /* i'm the sole producer of this memory; *ramtest_start()* is the sole * consumer. */ start = (ramtest_start_t *)calloc(sizeof(*start), 1); RAM_FAIL_EXPECT(RAM_REPLY_RESOURCEFAIL, NULL != start); start->ramtests_test = test_arg; start->ramtests_threadidx = i; RAM_FAIL_TRAP(ramthread_mkthread(&test_arg->ramtestt_threads[i], &ramtest_thread, start)); RAM_FAIL_TRAP(ramtest_fprintf(&unused, stderr, "[0] started thread %zu.\n", threadid)); } return RAM_REPLY_OK; }
ram_reply_t main2(int argc, char *argv[]) { ramtest_params_t testparams; ram_reply_t e = RAM_REPLY_INSANE; RAM_FAIL_TRAP(ram_initialize(NULL, NULL)); RAM_FAIL_TRAP(initdefaults(&testparams)); e = parseargs(&testparams, argc, argv); switch (e) { default: RAM_FAIL_TRAP(e); case RAM_REPLY_OK: break; case RAM_REPLY_INPUTFAIL: return e; } e = runtest(&testparams); switch (e) { default: RAM_FAIL_TRAP(e); case RAM_REPLY_OK: break; case RAM_REPLY_INPUTFAIL: return e; } return RAM_REPLY_OK; }
ram_reply_t ramtra_pop(void **ptr_arg, ramtra_trash_t *trash_arg) { void *p = NULL; ram_reply_t e = RAM_REPLY_INSANE; RAM_FAIL_NOTNULL(ptr_arg); *ptr_arg = NULL; RAM_FAIL_NOTNULL(trash_arg); RAM_FAIL_TRAP(rammtx_wait(&trash_arg->ramtrat_mutex)); p = RAMSLST_NEXT(&trash_arg->ramtrat_items); e = ramslst_remove(&trash_arg->ramtrat_items); trash_arg->ramtrat_size -= (RAM_REPLY_OK == e); /* if i fail to quit the mutex, the process can't continue meaningfully. */ RAM_FAIL_PANIC(rammtx_quit(&trash_arg->ramtrat_mutex)); if (RAM_REPLY_OK == e || RAM_REPLY_NOTFOUND == e) { *ptr_arg = p; return e; } else { RAM_FAIL_TRAP(e); /* i shouldn't be able to get here, since 'e' is known to not be RAM_REPLY_OK. */ return RAM_REPLY_INSANE; } }
ram_reply_t ramtest_join(ramtest_test_t *test_arg) { size_t i = 0; ram_reply_t myreply = RAM_REPLY_OK; size_t unused = 0; RAM_FAIL_NOTNULL(test_arg); RAM_FAIL_TRAP(ramtest_fprintf(&unused, stderr, "[0] i am waiting for my threads to finish...\n")); for (i = 0; i < test_arg->ramtestt_params.ramtestp_threadcount; ++i) { ram_reply_t e = RAM_REPLY_INSANE; RAM_FAIL_TRAP(ramthread_join(&e, test_arg->ramtestt_threads[i])); if (RAM_REPLY_OK != e) { RAM_FAIL_TRAP(ramtest_fprintf(&unused, stderr, "[0] thread %zu replied with an unexpected failure (%d).\n", i + 1, (int)e)); /* if i haven't yet recorded an error as my reply, do so now. this * ensures that the primary symptom is recorded and not any echoes * of the problem. */ RAM_FAIL_TRAP(ram_fail_accumulate(&myreply, e)); } } return myreply; }
ram_reply_t ramtra_mktrash2(ramtra_trash_t *trash_arg) { assert(trash_arg != NULL); RAM_FAIL_TRAP(rammtx_mkmutex(&trash_arg->ramtrat_mutex)); RAM_FAIL_TRAP(ramslst_mklist(&trash_arg->ramtrat_items)); trash_arg->ramtrat_size = 0; return RAM_REPLY_OK; }
ram_reply_t ramtest_test2(ramtest_test_t *test_arg) { size_t unused = 0; RAM_FAIL_TRAP(ramtest_fprintf(&unused, stderr, "[0] beginning test...\n")); RAM_FAIL_TRAP(ramtest_start(test_arg)); RAM_FAIL_TRAP(ramtest_join(test_arg)); return RAM_REPLY_OK; }
ram_reply_t ramvec_mkpool2(ramvec_pool_t *pool_arg, size_t nodecap_arg, ramvec_mknode_t mknode_arg) { assert(pool_arg != NULL); RAM_FAIL_NOTZERO(nodecap_arg); RAM_FAIL_NOTNULL(mknode_arg); RAM_FAIL_TRAP(ramlist_mklist(&pool_arg->ramvecvp_inv)); RAM_FAIL_TRAP(ramlist_mklist(&pool_arg->ramvecvp_avail)); pool_arg->ramvecvp_nodecapacity = nodecap_arg; pool_arg->ramvecvp_mknode = mknode_arg; return RAM_REPLY_OK; }
ram_reply_t ramtra_push(ramtra_trash_t *trash_arg, void *ptr_arg) { ram_reply_t e = RAM_REPLY_INSANE; RAM_FAIL_NOTNULL(trash_arg); RAM_FAIL_NOTNULL(ptr_arg); RAM_FAIL_TRAP(rammtx_wait(&trash_arg->ramtrat_mutex)); e = ramslst_insert((ramslst_slist_t *)ptr_arg, &trash_arg->ramtrat_items); trash_arg->ramtrat_size += (RAM_REPLY_OK == e); /* if i fail to quit the mutex, the process can't continue meaningfully. */ RAM_FAIL_PANIC(rammtx_quit(&trash_arg->ramtrat_mutex)); RAM_FAIL_TRAP(e); return RAM_REPLY_OK; }
ram_reply_t ramvec_initnode(ramvec_node_t *node_arg, ramvec_pool_t *pool_arg) { #ifndef NDEBUG int hastail = 0; #endif assert(node_arg != NULL); assert(pool_arg != NULL); /* the pool must be empty if a new node is to be be initialized. */ assert(RAM_REPLY_OK == ramlist_hastail(&hastail, &pool_arg->ramvecvp_avail) && !hastail); node_arg->ramvecn_vpool = pool_arg; RAM_FAIL_TRAP(ramlist_mklist(&node_arg->ramvecn_inv)); RAM_FAIL_TRAP(ramlist_mklist(&node_arg->ramvecn_avail)); return RAM_REPLY_OK; }
ram_reply_t release(ramtest_allocdesc_t *desc_arg) { RAM_FAIL_NOTNULL(desc_arg); RAM_FAIL_TRAP(ram_default_discard(desc_arg->ramtestad_ptr)); return RAM_REPLY_OK; }
ram_reply_t check(void *extra_arg, size_t threadidx_arg) { RAMANNOTATE_UNUSEDARG(extra_arg); RAMANNOTATE_UNUSEDARG(threadidx_arg); RAM_FAIL_TRAP(ram_default_check()); return RAM_REPLY_OK; }
ram_reply_t ramtest_dealloc(ramtest_allocdesc_t *ptrdesc_arg, ramtest_test_t *test_arg, size_t threadidx_arg) { void *pool = NULL; size_t sz = 0; ram_reply_t e = RAM_REPLY_INSANE; RAM_FAIL_NOTNULL(ptrdesc_arg); RAM_FAIL_NOTNULL(ptrdesc_arg->ramtestad_ptr); RAM_FAIL_NOTNULL(test_arg); RAM_FAIL_EXPECT(RAM_REPLY_RANGEFAIL, threadidx_arg < test_arg->ramtestt_params.ramtestp_threadcount); if (!test_arg->ramtestt_params.ramtestp_nofill) { RAM_FAIL_TRAP(ramtest_chkfill(ptrdesc_arg->ramtestad_ptr, ptrdesc_arg->ramtestad_sz)); } e = test_arg->ramtestt_params.ramtestp_query(&pool, &sz, ptrdesc_arg->ramtestad_ptr, test_arg->ramtestt_params.ramtestp_extra); switch (e) { default: RAM_FAIL_TRAP(e); return RAM_REPLY_INSANE; case RAM_REPLY_OK: RAM_FAIL_EXPECT(RAM_REPLY_CORRUPT, ptrdesc_arg->ramtestad_pool == pool); /* the size won't always be identical due to the nature of mux pools. * the size will never be smaller, though. */ RAM_FAIL_EXPECT(RAM_REPLY_CORRUPT, sz >= ptrdesc_arg->ramtestad_sz); RAM_FAIL_TRAP( test_arg->ramtestt_params.ramtestp_release(ptrdesc_arg)); break; case RAM_REPLY_NOTFOUND: RAM_FAIL_EXPECT(RAM_REPLY_INSANE, NULL == ptrdesc_arg->ramtestad_pool); free(ptrdesc_arg->ramtestad_ptr); break; } return RAM_REPLY_OK; }
ram_reply_t ramtra_rmtrash(ramtra_trash_t *trash_arg) { RAM_FAIL_NOTNULL(trash_arg); /* TODO: what do i do if the trash isn't empty yet? */ RAM_FAIL_TRAP(rammtx_rmmutex(&trash_arg->ramtrat_mutex)); memset(trash_arg, 0, sizeof(*trash_arg)); return RAM_REPLY_OK; }
ram_reply_t ramtest_alloc(ramtest_allocdesc_t *newptr_arg, ramtest_test_t *test_arg, size_t threadidx_arg) { int32_t roll = 0; ramtest_allocdesc_t desc = {0}; uint32_t n = 0; RAM_FAIL_NOTNULL(newptr_arg); memset(newptr_arg, 0, sizeof(*newptr_arg)); RAM_FAIL_NOTNULL(test_arg); RAM_FAIL_EXPECT(RAM_REPLY_RANGEFAIL, threadidx_arg < test_arg->ramtestt_params.ramtestp_threadcount); RAM_FAIL_TRAP(ramtest_randuint32(&n, test_arg->ramtestt_params.ramtestp_minsize, test_arg->ramtestt_params.ramtestp_maxsize + 1)); desc.ramtestad_sz = n; /* i want a certain percentage of allocations to be performed by * an alternate allocator. */ RAM_FAIL_TRAP(ramtest_randint32(&roll, 0, 100)); /* splint reports a problem in the next line regarding the difference * in type between the two integers being compared. i don't understand * why it's necessary to consider int32_t and int separate types and i * can't find any information about 16-bit programming platforms, so * i'm going to suppress it. */ if (/*@t1@*/roll < test_arg->ramtestt_params.ramtestp_mallocchance) { desc.ramtestad_pool = NULL; desc.ramtestad_ptr = malloc(desc.ramtestad_sz); } else { RAM_FAIL_TRAP(test_arg->ramtestt_params.ramtestp_acquire(&desc, desc.ramtestad_sz, test_arg->ramtestt_params.ramtestp_extra, threadidx_arg)); } if (!test_arg->ramtestt_params.ramtestp_nofill) RAM_FAIL_TRAP(ramtest_fill(desc.ramtestad_ptr, desc.ramtestad_sz)); *newptr_arg = desc; return RAM_REPLY_OK; }
ram_reply_t ramvec_chkinv(ramlist_list_t *list_arg, void *context_arg) { const ramvec_node_t *node = NULL; const ramvec_chkcontext_t *c = (ramvec_chkcontext_t *)context_arg; RAM_FAIL_NOTNULL(list_arg); assert(context_arg != NULL); RAM_FAIL_TRAP(ramlist_chklist(list_arg)); node = RAM_CAST_STRUCTBASE(ramvec_node_t, ramvecn_inv, list_arg); RAM_FAIL_EXPECT(RAM_REPLY_CORRUPT, c->ramveccc_pool == node->ramvecn_vpool); /* if additional checking was specified, pass control to that function with * its associated context. */ if (c->ramveccc_chknode) RAM_FAIL_TRAP(c->ramveccc_chknode(node)); return RAM_REPLY_AGAIN; }
ram_reply_t rammux_query(rammux_pool_t **mpool_arg, size_t *size_arg, void *ptr_arg) { ramalgn_pool_t *apool = NULL; ram_reply_t e = RAM_REPLY_INSANE; const ramalgn_tag_t *tag = NULL; ramsig_signature_t sig = {0}; RAM_FAIL_NOTNULL(mpool_arg); *mpool_arg = NULL; RAM_FAIL_NOTNULL(size_arg); *size_arg = 0; RAM_FAIL_NOTNULL(ptr_arg); e = ramalgn_query(&apool, ptr_arg); switch (e) { default: RAM_FAIL_TRAP(e); /* i shouldn't ever get here. */ return RAM_REPLY_INSANE; case RAM_REPLY_NOTFOUND: return e; case RAM_REPLY_OK: break; } RAM_FAIL_TRAP(ramalgn_gettag(&tag, apool)); /* i use the signature in the first half of the tag to increase the possibility that * an invalid address in 'ptr_arg' won't crash the process when someone attempts to dereference * the pointer that i expect to contain the mux pool. */ sig.ramsigs_n = tag->ramalgnt_values[0]; /* i consider a signature mismatch an expected failure here it this function is used * to determine whether a pointer belongs to another allocator. */ if (0 != RAMSIG_CMP(sig, rammux_thesignature)) return RAM_REPLY_NOTFOUND; RAM_FAIL_TRAP(ramalgn_getgranularity(size_arg, apool)); *mpool_arg = (rammux_pool_t *)tag->ramalgnt_values[1]; return RAM_REPLY_OK; }
ram_reply_t ramtra_foreach(ramtra_trash_t *trash_arg, ramtra_foreach_t func_arg, void *context_arg) { ram_reply_t e = RAM_REPLY_INSANE; ramtra_foreachadaptor_t fea = {0}; RAM_FAIL_NOTNULL(trash_arg); RAM_FAIL_NOTNULL(func_arg); fea.ramtrafea_function = func_arg; fea.ramtrafea_context = context_arg; RAM_FAIL_TRAP(rammtx_wait(&trash_arg->ramtrat_mutex)); /* i don't include the first element in 'ramtrat_items' because it is a sentinel and doesn't * actually hold a trashed pointer. */ e = ramslst_foreach(RAMSLST_NEXT(&trash_arg->ramtrat_items), &ramtra_foreachadaptor, &fea); /* if i fail to quit the mutex, the process can't continue meaningfully. */ RAM_FAIL_PANIC(rammtx_quit(&trash_arg->ramtrat_mutex)); RAM_FAIL_TRAP(e); return RAM_REPLY_OK; }
ram_reply_t rammux_chkpool(const rammux_pool_t *mpool_arg) { size_t i = 0; for (i = 0; i < RAMMUX_MAXPOOLCOUNT; ++i) { if (mpool_arg->rammuxp_initflags[i]) RAM_FAIL_TRAP(ramalgn_chkpool(&mpool_arg->rammuxp_apools[i])); } return RAM_REPLY_OK; }
ram_reply_t ramvec_chkpool(const ramvec_pool_t *pool_arg, ramvec_chknode_t chknode_arg) { ramlist_list_t *first = NULL; ramvec_chkcontext_t c = {0}; RAM_FAIL_NOTNULL(pool_arg); c.ramveccc_pool = pool_arg; c.ramveccc_chknode = chknode_arg; /* the sentinel needs to be checked but cannot be included in the foreach loop * because it references no data. it's safe to drop const qualifiers because * i know that ramlist_foreach() does not modify values passed into it. */ RAM_FAIL_TRAP(ramlist_chklist(&pool_arg->ramvecvp_inv)); RAM_FAIL_TRAP(ramlist_next(&first, (ramlist_list_t *)&pool_arg->ramvecvp_inv)); RAM_FAIL_TRAP(ramlist_foreach(first, (ramlist_list_t *)&pool_arg->ramvecvp_inv, &ramvec_chkinv, &c)); RAM_FAIL_TRAP(ramlist_chklist(&pool_arg->ramvecvp_avail)); RAM_FAIL_TRAP(ramlist_next(&first, (ramlist_list_t *)&pool_arg->ramvecvp_avail)); RAM_FAIL_TRAP(ramlist_foreach(first, (ramlist_list_t *)&pool_arg->ramvecvp_avail, &ramvec_chkavail, &c)); return RAM_REPLY_OK; }
ram_reply_t ramtest_test(const ramtest_params_t *params_arg) { ram_reply_t e = RAM_REPLY_INSANE; ramtest_test_t test = {0}; size_t unused = 0; RAM_FAIL_NOTNULL(params_arg); RAM_FAIL_TRAP(ramtest_describe(stderr, params_arg)); /* if a dry run has been specified, i'll quit now. */ if (params_arg->ramtestp_dryrun) return RAM_REPLY_OK; RAM_FAIL_TRAP(ramtest_inittest(&test, params_arg)); e = ramtest_test2(&test); RAM_FAIL_TRAP(ram_fail_accumulate(&e, ramtest_fintest(&test))); if (RAM_REPLY_OK == e) { RAM_FAIL_TRAP(ramtest_fprintf(&unused, stderr, "[0] the test succeeded.\n")); return RAM_REPLY_OK; } else { RAM_FAIL_TRAP(ramtest_fprintf(&unused, stderr, "[0] the test failed (reply code %d).\n", e)); RAM_FAIL_TRAP(e); return RAM_REPLY_INSANE; } }
ram_reply_t ramvec_getnode(ramvec_node_t **node_arg, ramvec_pool_t *pool_arg) { int hastail = 0; RAM_FAIL_NOTNULL(node_arg); *node_arg = NULL; RAM_FAIL_NOTNULL(pool_arg); RAM_FAIL_TRAP(ramlist_hastail(&hastail, &pool_arg->ramvecvp_avail)); if (hastail) { ramlist_list_t *l = NULL; /* there's something on the availability stack; i need to retrieve it. */ RAM_FAIL_TRAP(ramlist_next(&l, &pool_arg->ramvecvp_avail)); *node_arg = RAM_CAST_STRUCTBASE(ramvec_node_t, ramvecn_avail, l); return RAM_REPLY_OK; } else { ramvec_node_t *node = NULL; /* there's nothing on the availability stack; i need to make a new node. */ RAM_FAIL_TRAP(pool_arg->ramvecvp_mknode(&node, pool_arg)); RAM_FAIL_TRAP(ramvec_initnode(node, pool_arg)); RAM_FAIL_TRAP(ramlist_splice(&pool_arg->ramvecvp_avail, &node->ramvecn_avail)); RAM_FAIL_TRAP(ramlist_splice(&pool_arg->ramvecvp_inv, &node->ramvecn_inv)); *node_arg = node; return RAM_REPLY_OK; } }
ram_reply_t ramvec_release(ramvec_node_t *node_arg, int wasfull_arg, int isempty_arg) { ramvec_pool_t *pool = NULL; RAM_FAIL_NOTNULL(node_arg); pool = node_arg->ramvecn_vpool; assert(pool != NULL); /* if the node is now empty, i can discard it by removing it from both * the inventory and availability lists. */ if (isempty_arg) { ramlist_list_t *unused = NULL; RAM_FAIL_TRAP(ramlist_pop(&unused, &node_arg->ramvecn_inv)); RAM_FAIL_TRAP(ramlist_mknil(&node_arg->ramvecn_inv)); if (!RAMLIST_ISNIL(&node_arg->ramvecn_avail)) { RAM_FAIL_TRAP(ramlist_pop(&unused, &node_arg->ramvecn_avail)); RAM_FAIL_TRAP(ramlist_mknil(&node_arg->ramvecn_avail)); } } /* otherwise, if the node was full before releasing the memory object, * then i need push it onto the availability stack. */ else if (wasfull_arg) { RAM_FAIL_TRAP(ramlist_mklist(&node_arg->ramvecn_avail)); RAM_FAIL_TRAP(ramlist_splice(&pool->ramvecvp_avail, &node_arg->ramvecn_avail)); } return RAM_REPLY_OK; }
ram_reply_t ramvec_acquire(ramvec_node_t *node_arg, int isfull_arg) { ramvec_pool_t *pool = NULL; RAM_FAIL_NOTNULL(node_arg); pool = node_arg->ramvecn_vpool; assert(pool != NULL); /* now, if the node is full, it becomes unavailable. i remove it from the * availability stack. i can ignore the return value of 'ramlist_pop()' because * access to it is already preserved through 'pool->ramvecvp_avail'.*/ if (isfull_arg) { ramlist_list_t *unused = NULL; RAM_FAIL_TRAP(ramlist_pop(&unused, &node_arg->ramvecn_avail)); RAM_FAIL_TRAP(ramlist_mknil(&node_arg->ramvecn_avail)); } return RAM_REPLY_OK; }
ram_reply_t ramwin_mkbarrier(ramwin_barrier_t *barrier_arg, size_t capacity_arg) { RAM_FAIL_NOTNULL(barrier_arg); memset(barrier_arg, 0, sizeof(*barrier_arg)); RAM_FAIL_TRAP(ram_cast_sizetolong(&barrier_arg->ramwinb_capacity, capacity_arg)); barrier_arg->ramwinb_vacancy = barrier_arg->ramwinb_capacity; barrier_arg->ramwinb_event = CreateEvent(NULL, FALSE, FALSE, NULL); RAM_FAIL_EXPECT(RAM_REPLY_APIFAIL, NULL != barrier_arg->ramwinb_event); return RAM_REPLY_OK; }
ram_reply_t ramlin_rmbarrier(ramlin_barrier_t *barrier_arg) { RAM_FAIL_NOTNULL(barrier_arg); /* i don't allow destruction of the barrier while it's in use. */ RAM_FAIL_EXPECT(RAM_REPLY_UNSUPPORTED, barrier_arg->ramlinb_vacancy == barrier_arg->ramlinb_capacity); RAM_FAIL_EXPECT(RAM_REPLY_APIFAIL, 0 == pthread_cond_destroy(&barrier_arg->ramlinb_cond)); RAM_FAIL_TRAP(rammtx_rmmutex(&barrier_arg->ramlinb_mutex)); return RAM_REPLY_OK; }
ram_reply_t ramwin_release(char *pages_arg) { int ispage = 0; RAM_FAIL_NOTNULL(pages_arg); RAM_FAIL_TRAP(rammem_ispage(&ispage, pages_arg)); RAM_FAIL_EXPECT(RAM_REPLY_DISALLOWED, ispage); RAM_FAIL_EXPECT(RAM_REPLY_APIFAIL, VirtualFree(pages_arg, 0, MEM_RELEASE)); return RAM_REPLY_OK; }
ram_reply_t runtest2(const ramtest_params_t *params_arg, extra_t *extra_arg) { ramtest_params_t testparams = {0}; size_t unused = 0; testparams = *params_arg; /* i am responsible for policing the minimum and maximum allocation * size here. */ if (testparams.ramtestp_minsize < sizeof(void *) || testparams.ramtestp_maxsize < sizeof(void *)) { RAM_FAIL_TRAP(ramtest_fprintf(&unused, stderr, "you cannot specify a size smaller than %zu bytes.\n", sizeof(void *))); return RAM_REPLY_INPUTFAIL; } /* TODO: shouldn't this test be moved into the framework? */ if (testparams.ramtestp_minsize > testparams.ramtestp_maxsize) { RAM_FAIL_TRAP(ramtest_fprintf(&unused, stderr, "please specify a minimum size (%zu bytes) that is smaller than " "or equal to the maximum (%zu bytes).\n", testparams.ramtestp_minsize, testparams.ramtestp_maxsize)); return RAM_REPLY_INPUTFAIL; } /* TODO: how do i determine the maximum allocation size ahead of time? */ testparams.ramtestp_extra = extra_arg; testparams.ramtestp_acquire = &acquire; testparams.ramtestp_release = &release; testparams.ramtestp_query = &query; testparams.ramtestp_flush = &flush; testparams.ramtestp_check = ✓ RAM_FAIL_TRAP(ramtest_test(&testparams)); return RAM_REPLY_OK; }
ram_reply_t ramwin_decommit(char *page_arg) { int ispage = 0; RAM_FAIL_NOTNULL(page_arg); RAM_FAIL_EXPECT(RAM_REPLY_INCONSISTENT, ramwin_sysinfo.dwPageSize != 0); RAM_FAIL_TRAP(rammem_ispage(&ispage, page_arg)); RAM_FAIL_EXPECT(RAM_REPLY_DISALLOWED, ispage); RAM_FAIL_EXPECT(RAM_REPLY_APIFAIL, VirtualFree(page_arg, ramwin_sysinfo.dwPageSize, MEM_DECOMMIT)); return RAM_REPLY_OK; }
ram_reply_t ramtest_maxthreadcount(size_t *count_arg) { size_t cpucount = 0; RAM_FAIL_NOTNULL(count_arg); *count_arg = 0; RAM_FAIL_TRAP(ramsys_cpucount(&cpucount)); /* if the thread count is greater than 5 times the number of CPU's, * i'm going to disallow it. */ *count_arg = cpucount * 5; return RAM_REPLY_OK; }