static int map_sys_call_table(void) { unsigned long *syscalltab = get_sys_call_table(); unsigned long syscalladr = (unsigned long)virt_to_phys(syscalltab); unsigned long offset = syscalladr & (PAGE_SIZE-1); unsigned long start = align_address(syscalladr); unsigned long len = PAGE_SIZE; /* const unsigned int num_pages=1; */ sys_call_adr = ioremap(start, len); if (IS_ERR(sys_call_adr)) { PR_ERROR("unable to ioremap"); return PTR_ERR(sys_call_adr); } else { sys_call_adr_precise = sys_call_adr+offset; PR_DEBUG("got precise %p", sys_call_adr_precise); return 0; } }
value_t space_init(space_t *space, const runtime_config_t *config) { // Start out by clearing it, just for good measure. space_clear(space); // Allocate one word more than strictly necessary to account for possible // alignment. size_t bytes = config->semispace_size_bytes + kValueSize; memory_block_t memory = allocator_default_malloc(bytes); if (memory_block_is_empty(memory)) return new_system_error_condition(seAllocationFailed); // Clear the newly allocated memory to a recognizable value. memset(memory.memory, kBlankHeapMarker, bytes); address_t aligned = align_address(kValueSize, (address_t) memory.memory); space->memory = memory; space->next_free = space->start = aligned; // If malloc gives us an aligned pointer using only 'size_bytes' of memory // wastes the extra word we allocated to make room for alignment. However, // making the space size slightly different depending on whether malloc // aligns its data or not is a recipe for subtle bugs. space->limit = space->next_free + config->semispace_size_bytes; return success(); }