/* check that multiple allocations create different maps */
void test_vm_alloc_multiple_arguments(void)
{
	vm_map_t map1, map2;
	const w_size_t num_pages = 10;
	const w_size_t num_frames = 4;
	w_ptr_t start1, start2;
	w_handle_t ram_handle1, ram_handle2;
	w_handle_t swap_handle1, swap_handle2;

	memset(&map1, 0, sizeof(vm_map_t));
	memset(&map2, 0, sizeof(vm_map_t));

	vmsim_init();
	w_set_exception_handler(vmsim_test_segv_handler);
	vm_alloc(num_pages, num_frames, &map1);
	vm_alloc(num_pages, num_frames, &map2);

	start1 = map1.start;
	start2 = map2.start;
	ram_handle1 = map1.ram_handle;
	ram_handle2 = map2.ram_handle;
	swap_handle1 = map1.swap_handle;
	swap_handle2 = map2.swap_handle;

	vm_free(map1.start);
	vm_free(map2.start);
	vmsim_cleanup();

	basic_test(start1 != start2 &&
			ram_handle1 != ram_handle2 &&
			swap_handle1 != swap_handle2);
}
/* check that a write is carried through to file */
void test_mapping_write_is_carried_through_to_file(void)
{
	vm_map_t map;
	w_size_t pos;
	const w_size_t num_pages = 10;
	const w_size_t num_frames = 4;
	char tmp;
	w_boolean_t byte_found = FALSE;
	w_size_t page_offset;
	w_size_t i;

	vmsim_init();
	w_set_exception_handler(vmsim_test_segv_handler);
	vm_alloc(num_pages, num_frames, &map);
	zero_file(map.ram_handle, num_frames * p_sz);

	pos = get_random_byte_mapping_position(num_pages);

	((char *) map.start)[pos] = MAGIC;

	w_sync_mapping(map.start, num_pages);

	page_offset = pos % p_sz;
	for (i = 0; i < num_frames; i++) {
		tmp = read_byte_from_file(map.ram_handle,
					  i * p_sz + page_offset);
		if (tmp == MAGIC)
			byte_found = TRUE;
	}

	vm_free(map.start);
	vmsim_cleanup();

	basic_test(byte_found == TRUE);
}
/* do read; second read in same page should not fault */
void test_mapping_read_after_read_no_faults(void)
{
	vm_map_t map;
	char tmp;
	w_size_t pos;
	w_size_t new_offset;
	const w_size_t num_pages = 10;
	const w_size_t num_frames = 4;

	vmsim_init();
	w_set_exception_handler(vmsim_test_segv_handler);
	vm_alloc(num_pages, num_frames, &map);

	pos = get_random_byte_mapping_position(num_pages);

	tmp = ((char *) map.start)[pos];
	vmsim_test_reset_faults();
	new_offset = pos / p_sz * p_sz + get_random_byte_mapping_position(1);
	tmp = ((char *) map.start)[new_offset];

	vm_free(map.start);
	vmsim_cleanup();

	basic_test(vmsim_test_get_num_faults() == 0);
}
/* do page fault when reading -- demand paging */
void test_mapping_one_fault_per_page_read(void)
{
	vm_map_t map;
	char tmp;
	w_size_t pos;
	const w_size_t num_pages = 10;
	const w_size_t num_frames = 4;
	w_size_t i;

	vmsim_init();
	w_set_exception_handler(vmsim_test_segv_handler);
	vm_alloc(num_pages, num_frames, &map);

	vmsim_test_reset_faults();
	for (i = 0; i < num_pages; i++) {
		pos = get_random_byte_mapping_position(1);
		dlog(LOG_DEBUG, "read from page %u\n", i);
		tmp = ((char *) map.start)[i * p_sz + pos];
	}

	vm_free(map.start);
	vmsim_cleanup();

	basic_test(vmsim_test_get_num_faults() == num_pages);
}
/* check that page is cleared at first time allocation in RAM */
void test_page_is_cleared_at_first_allocation(void)
{
	vm_map_t map;
	char tmp, value;
	const w_size_t offset = 10;

	vmsim_init();
	w_set_exception_handler(vmsim_test_segv_handler);
	vm_alloc(2, 1, &map);

	/* fault first page (write) */
	*((char *) map.start + 0 * p_sz + offset) = MAGIC;

	/* fault second page (read-only) */
	tmp = *((char *) map.start + 1 * p_sz + offset);

	w_sync_mapping(map.start, 2);

	w_set_file_pointer(map.ram_handle, 0 * p_sz + offset);
	w_read_file(map.ram_handle, &value, 1);

	dlog(LOG_LEVEL, "value = %02x, tmp = %02x\n", value, tmp);

	vm_free(map.start);
	vmsim_cleanup();

	basic_test(value == 0 && tmp == 0);
}
/* check that page has read protection after swap in */
void test_page_has_read_prot_after_swap_in(void)
{
	vm_map_t map;
	const w_size_t offset1 = 10;
	const w_size_t offset2 = 20;

	vmsim_init();
	w_set_exception_handler(vmsim_test_segv_handler);
	vm_alloc(2, 1, &map);

	/* fault first page (write) */
	*((char *) map.start + 0 * p_sz + offset1) = MAGIC;
	w_sync_mapping(map.start, 2);

	/* fault second page (write) -- first page goes to swap */
	*((char *) map.start + 1 * p_sz + offset2) = MAGIC;
	w_sync_mapping(map.start, 2);

	vmsim_test_reset_faults();

	/* fault first page (write) */
	*((char *) map.start + 0 * p_sz + offset1) = MAGIC;
	w_sync_mapping(map.start, 2);

	vm_free(map.start);
	vmsim_cleanup();

	basic_test(vmsim_test_get_num_faults() == 2);
}
Beispiel #7
0
/**
Allocate a string on the hosted heap
*/
string_t* string_alloc(uint32_t len)
{
    string_t* str = (string_t*)vm_alloc(sizeof(string_t) + len, TAG_STRING);

    str->len = len;

    return str;
}
/* test swap out */
void test_swap_out(void)
{
	vm_map_t map;
	const w_size_t num_pages = 10;
	const w_size_t num_frames = 4;
	char tmp;
	w_size_t i;
	w_size_t j;
	w_size_t *offset_array;
	w_size_t match_count = 0;

	vmsim_init();
	w_set_exception_handler(vmsim_test_segv_handler);
	vm_alloc(num_pages, num_frames, &map);

	/* zero swap for further checking */
	zero_file(map.swap_handle, num_pages * p_sz);

	offset_array = malloc((num_frames + 1) * sizeof(*offset_array));

	/*
	 * at most num_frame pages will be backed by RAM
	 * the (num_frame + 1)-th access will result in a swap out
	 */
	for (i = 0; i < num_frames + 1; i++) {
		offset_array[i] = get_random_byte_mapping_position(1);
		*((char *) map.start + i * p_sz + offset_array[i]) = MAGIC;
	}

	/* sync ram file to ensure proper swap out */
	w_sync_mapping(map.start, num_pages);

	/* go through swap to look for swapped pages */
	for (i = 0; i < num_pages; i++) {
		for (j = 0; j < num_frames + 1; j++) {
			w_size_t offset = i * p_sz + offset_array[j];

			tmp = read_byte_from_file(map.swap_handle, offset);
			if (tmp == MAGIC) {
				dlog(LOG_DEBUG, "match found, frame index:"
					"%u, offset: %u\n", i, offset);
				match_count++;
				break;
			}
		}
	}

	dlog(LOG_DEBUG, "match count: %u\n", match_count);

	vm_free(map.start);
	vmsim_cleanup();
	free(offset_array);

	basic_test(match_count > 0);
}
Beispiel #9
0
bool vm_load(VectorOfMatrices* vm, const char* filename)
{
    assert(vm != NULL);

    FILE* fp = fopen(filename, "r");
    if (fp == NULL) {
        perror(filename);
        goto return_false;
    }

    int fmt;
    unsigned long count;
    unsigned long rows;
    unsigned long cols;

    int suc = fscanf(fp, "%d", &fmt);
    CHECK_IO(suc, 1, return_false);
    if (fmt != FMT_VECTOR_OF_MATRICES) {
        goto return_false;
    }

    suc = fscanf(fp, "%lu %lu %lu", &count, &rows, &cols);
    CHECK_IO(suc, 3, return_false);
    if (rows <= 0 || cols <= 0 || count <= 0) {
        goto return_false;
    }
    vm->rows = rows;
    vm->cols = cols;
    vm->count = count;
    if (!vm_alloc(vm)) {
        goto return_false;
    }

    int* pvm = vm->items;
    size_t iter_count = rows * cols * count;
    for (size_t i = 0; i < iter_count; ++i) {
        suc = fscanf(fp, "%d", pvm++);
        CHECK_IO(suc, 1, return_false);
    }

    if (fclose(fp) != 0) {
        perror(filename);
    }

    return true;

return_false:
    if (fp != NULL && fclose(fp) != 0) {
        perror(filename);
    }

    vm_free(vm);
    return false;
}
Beispiel #10
0
clos_t* clos_alloc(ast_fun_t* fun)
{
    clos_t* clos = (clos_t*)vm_alloc(
        sizeof(clos_t) + sizeof(cell_t*) * fun->free_vars->len,
        SHAPE_STRING
    );

    clos->fun = fun;

    return clos;
}
Beispiel #11
0
static void vm_save_frame(vm_context_t *ctx, uint8_t *pc)
{
	vm_callframe_t *frame;

	frame = vm_alloc(sizeof(*frame));
	frame->return_pc = pc;
	frame->locals = ctx->locals;
	frame->dstack_top = vm_stack_top(ctx->dstack);

	vm_stack_push(ctx->cstack, (vm_operand_t)frame);

	ctx->locals = NULL;
}
Beispiel #12
0
array_t* array_alloc(uint32_t cap)
{
    // Note: the heap is zeroed out on allocation
    array_t* arr = (array_t*)vm_alloc(
        sizeof(array_t) + cap * sizeof(value_t),
        TAG_ARRAY
    );

    arr->cap = cap;
    arr->len = 0;

    return arr;
}
/* test swap in */
void test_swap_in(void)
{
	vm_map_t map;
	const w_size_t num_pages = 10;
	const w_size_t num_frames = 4;
	char tmp;
	w_size_t i;
	w_size_t offset;
	w_boolean_t match_found = FALSE;

	vmsim_init();
	w_set_exception_handler(vmsim_test_segv_handler);
	vm_alloc(num_pages, num_frames, &map);

	/* fault all pages */
	for (i = 0; i < num_pages; i++) {
		offset = get_random_byte_mapping_position(1);
		*((char *) map.start + i * p_sz + offset) = MAGIC;
	}

	dlog(LOG_INFO, "all pages faulted\n");

	/* fill swap with other data */
	fill_file(map.swap_handle, num_pages * p_sz, ~MAGIC);

	/*
	 * at most num_frame pages will be backed by RAM;
	 * at least the (num_frame + 1)-th page is in swap;
	 * it will be swapped in
	 */
	for (i = 0; i < num_frames + 1; i++) {
		offset = get_random_byte_mapping_position(1);
		*((char *) map.start + i * p_sz + offset) = MAGIC;
	}

	/* some pages will be swapped in */
	for (i = 0; i < num_pages; i++) {
		offset = get_random_byte_mapping_position(1);
		tmp = *((char *) map.start + i * p_sz + offset);
		dlog(LOG_DEBUG, "tmp = %02x, not-magic = %02x\n", tmp, ~MAGIC);
		if (tmp == ~MAGIC)
			match_found = TRUE;
	}

	dlog(LOG_DEBUG, "match found: %d\n", match_found);

	vm_free(map.start);
	vmsim_cleanup();

	basic_test(match_found == TRUE);
}
static w_boolean_t test_vm_alloc_bad_arguments(void)
{
	vm_map_t map;
	w_boolean_t rc;
	const w_size_t num_frames = 4;
	const w_size_t num_pages = 2;

	vmsim_init();
	rc = vm_alloc(num_pages, num_frames, &map);

	vmsim_cleanup();

	return rc;
}
void test_mapping_multiple_writes_do_not_overwrite_ram(void)
{
	vm_map_t map;
	const w_size_t num_pages = 10;
	const w_size_t num_frames = 4;
	char tmp;
	w_size_t i;
	w_size_t j;
	w_size_t *offset_array;
	w_size_t match_count = 0;

	vmsim_init();
	w_set_exception_handler(vmsim_test_segv_handler);
	vm_alloc(num_pages, num_frames, &map);
	zero_file(map.ram_handle, num_frames * p_sz);

	offset_array = malloc(num_pages * sizeof(*offset_array));

	for (i = 0; i < num_pages; i++) {
		offset_array[i] = get_random_byte_mapping_position(1);
		*((char *) map.start + i * p_sz + offset_array[i]) = MAGIC;
	}

	w_sync_mapping(map.start, num_pages);

	for (i = 0; i < num_frames; i++) {
		for (j = 0; j < num_pages; j++) {
			w_size_t offset = i * p_sz + offset_array[j];

			tmp = read_byte_from_file(map.ram_handle, offset);
			if (tmp == MAGIC) {
				dlog(LOG_DEBUG, "match found, frame index:"
					"%u, offset: %u\n", i, offset);
				match_count++;
				break;
			}
		}
	}

	dlog(LOG_DEBUG, "match count: %u\n", match_count);

	vm_free(map.start);
	vmsim_cleanup();
	free(offset_array);

	basic_test(match_count == num_frames);
}
/* check swap file size */
void test_vm_alloc_swap_size(void)
{
	vm_map_t map;
	w_size_t size;
	const w_size_t num_pages = 10;
	const w_size_t num_frames = 4;

	vmsim_init();
	vm_alloc(num_pages, num_frames, &map);

	size = w_get_file_size_by_handle(map.swap_handle);

	vm_free(map.start);
	vmsim_cleanup();

	basic_test(size == num_pages * p_sz);
}
/* test multiple allocations */
void test_vm_alloc_multiple_get_faults(void)
{
	const w_size_t num_mappings = 6;
	vm_map_t *maps;
	char tmp;
	w_size_t pos;
	const w_size_t num_pages = 10;
	const w_size_t num_frames = 4;
	w_size_t expected_faults = 0;
	w_size_t i, mcount;

	vmsim_init();
	w_set_exception_handler(vmsim_test_segv_handler);
	vmsim_test_reset_faults();

	maps = malloc(num_mappings * sizeof(*maps));
	for (mcount = 0; mcount < num_mappings; mcount++)
		vm_alloc(num_pages, num_frames, maps + mcount);

	for (mcount = 0; mcount < num_mappings; mcount++) {
		/* go through even pages */
		for (i = 0; i < num_pages; i += 2) {
			pos = get_random_byte_mapping_position(1);
			/* do page fault when reading -- demand paging */
			tmp = ((char *) maps[mcount].start)[i * p_sz + pos];
			/* when reading, one page fault occurs */
			expected_faults++;
		}

		/* go through odd pages */
		for (i = 1; i < num_pages; i += 2) {
			pos = get_random_byte_mapping_position(1);
			/* do page fault when writing -- demand paging */
			((char *) maps[mcount].start)[i * p_sz + pos] = MAGIC;
			/* when writing, two page faults occur */
			expected_faults += 2;
		}
	}

	for (mcount = 0; mcount < num_mappings; mcount++)
		vm_free(maps[mcount].start);
	free(maps);
	vmsim_cleanup();

	basic_test(vmsim_test_get_num_faults() == expected_faults);
}
/* check that swap file is valid after allocation */
void test_vm_alloc_swap_handle(void)
{
	vm_map_t map;
	w_boolean_t rc;
	const w_size_t num_pages = 10;
	const w_size_t num_frames = 4;

	vmsim_init();
	vm_alloc(num_pages, num_frames, &map);

	rc = w_handle_is_valid(map.swap_handle);

	vm_free(map.start);
	vmsim_cleanup();

	basic_test(rc == TRUE);
}
/* check that initial read-only page is swapped out */
void test_initial_readonly_page_is_swapped_out(void)
{
	vm_map_t map;
	const w_size_t num_pages = 10;
	const w_size_t num_frames = 4;
	char tmp;
	w_size_t i;
	w_size_t offset;
	w_boolean_t match_found = FALSE;

	vmsim_init();
	w_set_exception_handler(vmsim_test_segv_handler);
	vm_alloc(num_pages, num_frames, &map);

	/* zero swap for further checking */
	fill_file(map.swap_handle, num_pages * p_sz, ~MAGIC);

	/*
	 * at most num_frame pages will be backed by RAM
	 * the (num_frame + 1)-th access will result in a swap out
	 */
	for (i = 0; i < num_frames + 1; i++) {
		offset = get_random_byte_mapping_position(1);
		tmp = *((char *) map.start + i * p_sz + offset);
		/* sync ram file to ensure proper swap out */
		w_sync_mapping(map.start, num_pages);
	}

	/* go through swap to look for swapped pages */
	for (i = 0; i < num_pages; i++) {
		tmp = read_byte_from_file(map.swap_handle, i * p_sz + offset);
		dlog(LOG_DEBUG, "tmp = %02x\n", tmp);
		if (tmp != ~MAGIC) {
			dlog(LOG_DEBUG, "match found, frame index:"
				"%u, offset: %u\n", i, offset);
			match_found = TRUE;
		}
	}

	dlog(LOG_DEBUG, "match found: %d\n", match_found);

	vm_free(map.start);
	vmsim_cleanup();

	basic_test(match_found == TRUE);
}
Beispiel #20
0
vm_context_t *vm_context_create(vm_module_t *module)
{
	vm_context_t *ctx;

	ctx = (vm_context_t *)vm_alloc(sizeof(*ctx));
	if (!ctx) {
		vm_panic("vm_context_create: failed to allocate context.");
	}

	ctx->dstack = vm_stack_create(65536);
	ctx->cstack = vm_stack_create(8192);	
	ctx->pc = module->entry;
	ctx->locals = NULL;
	ctx->module = module;

	return ctx;
}
Beispiel #21
0
void *__vmap(const unsigned long *mfn, unsigned int granularity,
             unsigned int nr, unsigned int align, unsigned int flags)
{
    void *va = vm_alloc(nr * granularity, align);
    unsigned long cur = (unsigned long)va;

    for ( ; va && nr--; ++mfn, cur += PAGE_SIZE * granularity )
    {
        if ( map_pages_to_xen(cur, *mfn, granularity, flags) )
        {
            vunmap(va);
            va = NULL;
        }
    }

    return va;
}
Beispiel #22
0
/* assemble a string on the stack into a proc */
void op_asm(vm_internal_type *vm) {
    object_type *obj = 0;
    object_type *closure =0;
    env_type *env = 0;

    uint8_t *code_ref = 0;
    size_t written = 0;

    gc_register_root(vm->gc, (void **)&obj);
    gc_register_root(vm->gc, (void **)&closure);
    gc_register_root(vm->gc, (void **)&code_ref);
    gc_register_root(vm->gc, (void **)&env);

    obj = vm_pop(vm);

    if(!obj || obj->type != STRING) {
        throw(vm, "Attempt to assemble non-string", 1, obj);

    } else {
        /* assemble the string */
        written = asm_string(vm->gc, obj->value.string.bytes, &code_ref);

        /* clone the current environment in a
           closure */
        clone_env(vm, (env_type **)&env, vm->env, false);

        /* point to the entry point of our
           assembled code_ref */
        env->code_ref = code_ref;
        env->ip = 0;
        env->length = written;

        closure = vm_alloc(vm, CLOSURE);
        /* save the new closure onto our stack */
        closure->value.closure = env;

        vm_push(vm, closure);
    }

    gc_unregister_root(vm->gc, (void **)&env);
    gc_unregister_root(vm->gc, (void **)&code_ref);
    gc_unregister_root(vm->gc, (void **)&closure);
    gc_unregister_root(vm->gc, (void **)&obj);

}
/* test whether clean pages are swapped out (it shouldn't happen) */
void test_clean_page_is_not_swapped_out(void)
{
	vm_map_t map;
	char tmp, value1, value2;
	const w_size_t offset1 = 10;
	const w_size_t offset2 = 20;

	vmsim_init();
	w_set_exception_handler(vmsim_test_segv_handler);
	vm_alloc(2, 1, &map);

	/* fault first page (write) */
	*((char *) map.start + 0 * p_sz + offset1) = MAGIC;
	w_sync_mapping(map.start, 2);

	/* fault second page (write) -- first page goes to swap */
	*((char *) map.start + 1 * p_sz + offset2) = MAGIC;
	w_sync_mapping(map.start, 2);

	/* fault first page (read-only) -- second page goes to swap */
	tmp = *((char *) map.start + 0 * p_sz + offset1);
	w_sync_mapping(map.start, 2);

	fill_file(map.swap_handle, 2 * p_sz, ~MAGIC);

	/* fault second page (read-only) -- first page should not go to swap */
	tmp = *((char *) map.start + 1 * p_sz + offset2);
	w_sync_mapping(map.start, 2);

	/* fault first page (read-only) -- second page should not go to swap */
	tmp = *((char *) map.start + 0 * p_sz + offset1);
	w_sync_mapping(map.start, 2);

	w_set_file_pointer(map.swap_handle, 0 * p_sz + offset1);
	w_read_file(map.swap_handle, &value1, 1);
	w_set_file_pointer(map.swap_handle, 1 * p_sz + offset2);
	w_read_file(map.swap_handle, &value2, 1);

	dlog(LOG_LEVEL, "value1 = %02x, value2 = %02x\n", value1, value2);

	vm_free(map.start);
	vmsim_cleanup();

	basic_test(value1 == ~MAGIC && value2 == ~MAGIC);
}
/* check that after free, swap file is invalid */
void test_vm_free_swap_handle(void)
{
	vm_map_t map;
	w_boolean_t rc_before_free, rc_after_free;
	const w_size_t num_pages = 10;
	const w_size_t num_frames = 4;

	vmsim_init();
	vm_alloc(num_pages, num_frames, &map);

	rc_before_free = w_handle_is_valid(map.swap_handle);
	vm_free(map.start);
	rc_after_free = w_handle_is_valid(map.swap_handle);

	vmsim_cleanup();

	basic_test(rc_before_free == TRUE && rc_after_free == FALSE);
}
/* check that RAM file is valid after allocation */
void test_vm_alloc_ram_handle(void)
{
	vm_map_t map;
	w_boolean_t rc;
	const w_size_t num_pages = 10;
	const w_size_t num_frames = 4;

	vmsim_init();
	dlog(LOG_DEBUG, "vmsim_init called\n");
	vm_alloc(num_pages, num_frames, &map);
	dlog(LOG_DEBUG, "vm_alloc called\n");

	rc = w_handle_is_valid(map.ram_handle);
	dlog(LOG_DEBUG, "w_handle_is_valid called\n");

	vm_free(map.start);
	vmsim_cleanup();

	basic_test(rc == TRUE);
}
/* check that free actually unmaps a zone */
void test_vm_free_start_addr(void)
{
	vm_map_t map;
	w_boolean_t rc_before_free, rc_after_free;
	const w_size_t num_pages = 10;
	const w_size_t num_frames = 4;

	vmsim_init();
	vm_alloc(num_pages, num_frames, &map);

	rc_before_free = w_protect_mapping(map.start, num_pages,
		PROTECTION_READ);
	vm_free(map.start);
	rc_after_free = w_protect_mapping(map.start, num_pages,
		PROTECTION_READ);

	vmsim_cleanup();

	basic_test(rc_before_free == TRUE && rc_after_free == FALSE);
}
/* check that handler is called at page fault */
void test_mapping_is_set_in_handler(void)
{
	vm_map_t map;
	char tmp;
	const w_size_t num_pages = 10;
	const w_size_t num_frames = 4;

	vmsim_init();
	w_set_exception_handler(vmsim_test_segv_handler);
	vm_alloc(num_pages, num_frames, &map);

	vmsim_test_reset_faults();

	/* do read; force exception handler call */
	tmp = ((char *) map.start)[0];

	vm_free(map.start);
	vmsim_cleanup();

	basic_test(vmsim_test_get_num_faults() != 0);
}
/* do write; should result in two faults */
void test_mapping_write_results_in_two_faults(void)
{
	vm_map_t map;
	w_size_t pos;
	const w_size_t num_pages = 10;
	const w_size_t num_frames = 4;

	vmsim_init();
	w_set_exception_handler(vmsim_test_segv_handler);
	vm_alloc(num_pages, num_frames, &map);

	pos = get_random_byte_mapping_position(num_pages);

	vmsim_test_reset_faults();
	((char *) map.start)[pos] = MAGIC;

	vm_free(map.start);
	vmsim_cleanup();

	basic_test(vmsim_test_get_num_faults() == 2);
}
/* check that virtual memory allocation goes smoothly */
void test_vm_alloc_start_addr(void)
{
	vm_map_t map;
	w_boolean_t rc;
	const w_size_t num_pages = 10;
	const w_size_t num_frames = 4;

	vmsim_init();
	dlog(LOG_DEBUG, "vmsim_init called\n");
	vm_alloc(num_pages, num_frames, &map);
	dlog(LOG_DEBUG, "vm_alloc called\n");

	/* if mapping's protection can be changed, the mapping is OK */
	rc = w_protect_mapping(map.start, num_pages, PROTECTION_READ);
	dlog(LOG_DEBUG, "w_protect_mapping called\n");

	vm_free(map.start);
	vmsim_cleanup();

	basic_test(rc == TRUE);
}
void test_mapping_mixed_faults(void)
{
	vm_map_t map;
	char tmp;
	w_size_t pos;
	const w_size_t num_pages = 10;
	const w_size_t num_frames = 4;
	w_size_t expected_faults = 0;
	w_size_t i;

	vmsim_init();
	w_set_exception_handler(vmsim_test_segv_handler);
	vm_alloc(num_pages, num_frames, &map);

	vmsim_test_reset_faults();

	/* go through even pages */
	for (i = 0; i < num_pages; i += 2) {
		pos = get_random_byte_mapping_position(1);
		/* do page fault when reading -- demand paging */
		tmp = ((char *) map.start)[i * p_sz + pos];
		/* when reading, one page fault occurs */
		expected_faults++;
	}

	/* go through odd pages */
	for (i = 1; i < num_pages; i += 2) {
		pos = get_random_byte_mapping_position(1);
		/* do page fault when writing -- demand paging */
		((char *) map.start)[i * p_sz + pos] = MAGIC;
		/* when writing, two page faults occur */
		expected_faults += 2;
	}

	vm_free(map.start);
	vmsim_cleanup();

	basic_test(vmsim_test_get_num_faults() == expected_faults);
}