/* Check returned segments are same state with buffer */ static int check_map_region(struct inode *inode, block_t start, unsigned count, struct block_segment *seg, unsigned seg_max) { int segs; segs = map_region(inode, start, count, seg, seg_max, MAP_READ); if (segs > 0) check_maps(inode, start, seg, segs); return segs; }
char* FileMapInfo::map_region(int i, ReservedSpace rs) { struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i]; size_t used = si->_used; size_t size = align_size_up(used, os::vm_allocation_granularity()); ReservedSpace mapped_rs = rs.first_part(size, true, true); ReservedSpace unmapped_rs = rs.last_part(size); mapped_rs.release(); return map_region(i, true); }
/* * Makes a mapped region bigger, by extending it at the end. Does not change * the user_memory address or the first mapped user address, hence there is * no need to update the share cache hash or any other meta-data. Assumes * the region is locked. */ boolean_t map_region_bigger( user_memory_t region_p, vm_size_t new_size, vm_prot_t prot) { vm_address_t user_addr; user_memory_t new_region_p; kern_return_t kr; debug(0, ++user_memory_num_extends); user_addr = PAGENUM_TO_ADDR(region_p->user_page); /* * Cancel the increment that will be done by map_region */ debug(0, --user_memory_num_maps); if ((new_region_p = map_region(region_p->task, user_addr, new_size, prot)) != NULL) { /* * Deallocate the old shared memory */ kr = vm_deallocate(mach_task_self(), region_p->svr_addr, region_p->size); if (kr != KERN_SUCCESS) { MACH3_DEBUG(1, kr, ("map_region_bigger: vm_deallocate")); } /* * Copy the relevant fields of the new descriptor on top of * the old one */ region_p->svr_addr = new_region_p->svr_addr; region_p->size = new_region_p->size; /* * Free the new region descriptor. We unlock it for the * benefit of debugging code that keeps track of the locks * held by a thread. */ ASSERT(new_region_p->ref_count == 0); new_region_p->ref_count--; kfree(new_region_p); } else { return FALSE; } debug_prf(2,("%s: extended map at %x to len %d for task %p.\n", "map_region_bigger", user_addr, new_size, region_p->task)); return TRUE; }
/* Create segments, then save state to buffer */ static int d_map_region(struct inode *inode, block_t start, unsigned count, struct block_segment *seg, unsigned seg_max, enum map_mode mode) { int segs; /* this should be called with "mode != MAP_READ" */ assert(mode != MAP_READ); segs = map_region(inode, start, count, seg, seg_max, mode); if (segs > 0) add_maps(inode, start, seg, segs); return segs; }
static Address trymmap(size_t len, Address beg, Address end, size_t inc, int fd) { Address addr; void *result; /*We have a possibly large region (beg to end) and a hopefully smaller */ /* allocation size (len). We try to map at every page in the region*/ /* until we get one that succeeds.*/ for (addr = beg; addr + len <= end; addr += inc) { result = map_region((void *) addr, len, fd); if (result) return (Address) result; } return (Address) NULL; }
bool FileMapInfo::map_space(int i, ReservedSpace rs, ContiguousSpace* space) { struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i]; if (space != NULL) { if (si->_base != (char*)space->bottom() || si->_capacity != space->capacity()) { fail_continue("Shared space base address does not match."); return false; } } bool result = (map_region(i, rs) != NULL); if (space != NULL && result) { space->set_top((HeapWord*)(si->_base + si->_used)); space->set_saved_mark(); } return result; }
/* Compute the region for which this operation is defined. */ static GeglRectangle get_bounding_box (GeglOperation *operation) { GeglRectangle result = {0,0,0,0}; GeglRectangle *in_rect = gegl_operation_source_get_bounding_box (operation, "input"); GeglChantO *area = GEGL_CHANT_PROPERTIES (operation); #ifdef TRACE g_warning ("> get_bounding_box pointer == 0x%x in_rect == 0x%x", area->lens_info_pointer, in_rect); #endif if (in_rect && area->lens_info_pointer) result = map_region (in_rect, area->lens_info_pointer, find_dst_pixel); #ifdef TRACE g_warning ("< get_bounding_box result = %dx%d+%d+%d", result.width, result.height, result.x, result.y); #endif return result; }
/* Compute the input rectangle required to compute the specified region of interest (roi). */ static GeglRectangle get_required_for_output (GeglOperation *operation, const gchar *input_pad, const GeglRectangle *roi) { GeglChantO *area = GEGL_CHANT_PROPERTIES (operation); GeglRectangle result = *gegl_operation_source_get_bounding_box (operation, "input"); #ifdef TRACE g_warning ("> get_required_for_output src=%dx%d+%d+%d", result.width, result.height, result.x, result.y); if (roi) g_warning (" ROI == %dx%d+%d+%d", roi->width, roi->height, roi->x, roi->y); #endif if (roi && area->lens_info_pointer) { result = map_region (roi, area->lens_info_pointer, find_src_pixel); result.width++; result.height++; } #ifdef TRACE g_warning ("< get_required_for_output res=%dx%d+%d+%d", result.width, result.height, result.x, result.y); #endif return result; }
/* Test basic operations */ static void test01(struct sb *sb, struct inode *inode) { /* * FIXME: map_region() are not supporting to read segments on * multiple leaves at once. */ #define CAN_HANDLE_A_LEAF 1 /* Create by ascending order */ if (test_start("test01.1")) { struct block_segment seg; int err, segs; /* Set fake backend mark to modify backend objects. */ tux3_start_backend(sb); for (int i = 0, j = 0; i < 30; i++, j++) { segs = d_map_region(inode, 2*i, 1, &seg, 1, MAP_WRITE); test_assert(segs == 1); } #ifdef CAN_HANDLE_A_LEAF for (int i = 0; i < 30; i++) { segs = check_map_region(inode, 2*i, 1, &seg, 1); test_assert(segs == 1); } #else segs = check_map_region(inode, 0, 30*2, seg, ARRAY_SIZE(seg)); test_assert(segs == 30*2); #endif /* btree_chop and dleaf_chop test */ int index = 31*2; while (index--) { err = btree_chop(&tux_inode(inode)->btree, index, TUXKEY_LIMIT); test_assert(!err); #ifdef CAN_HANDLE_A_LEAF for (int i = 0; i < 30; i++) { if (index <= i*2) break; segs = check_map_region(inode, 2*i, 1, &seg, 1); test_assert(segs == 1); } #else segs = check_map_region(inode, 0, 30*2, seg, ARRAY_SIZE(seg)); test_assert(segs == i*2); #endif } /* Check if truncated all */ segs = map_region(inode, 0, INT_MAX, &seg, 1, MAP_READ); test_assert(segs == 1); test_assert(seg.count == INT_MAX); test_assert(seg.state == BLOCK_SEG_HOLE); tux3_end_backend(); test_assert(force_delta(sb) == 0); clean_main(sb, inode); } test_end(); /* Create by descending order */ if (test_start("test01.2")) { struct block_segment seg; int err, segs; /* Set fake backend mark to modify backend objects. */ tux3_start_backend(sb); for (int i = 30; i >= 0; i--) { segs = d_map_region(inode, 2*i, 1, &seg, 1, MAP_WRITE); test_assert(segs == 1); } #ifdef CAN_HANDLE_A_LEAF for (int i = 30; i >= 0; i--) { segs = check_map_region(inode, 2*i, 1, &seg, 1); test_assert(segs == 1); } #else segs = check_map_region(inode, 0, 30*2, seg, ARRAY_SIZE(seg)); test_assert(segs == i*2); #endif err = btree_chop(&tux_inode(inode)->btree, 0, TUXKEY_LIMIT); test_assert(!err); /* Check if truncated all */ segs = map_region(inode, 0, INT_MAX, &seg, 1, MAP_READ); test_assert(segs == 1); test_assert(seg.count == INT_MAX); test_assert(seg.state == BLOCK_SEG_HOLE); tux3_end_backend(); test_assert(force_delta(sb) == 0); clean_main(sb, inode); } test_end(); test_assert(force_delta(sb) == 0); clean_main(sb, inode); }
/* * Returns the starting address in the server that is mapped to the * range of user addresses specified by task, user_addr and size. The * address range will be mapped into the server and entered into the share * cache by this call if it is not already. If part of it is mapped, but * not all of it, the mapped range will be extended into contiguous server * addresses. Neither the input range nor the returned starting address * are necessarily page aligned. * * If successful, the region of memory is locked and an identifier of the * region (for use as an arg to user_memory_unlock_region) is returned through * the region_id_p ptr); otherwise NULL is returned. */ user_memory_t user_memory_slow_lookup( struct task_struct *task, vm_address_t user_addr, vm_size_t size, vm_prot_t prot, vm_address_t *svr_addrp) { vm_address_t pagenum; int pageoffset; user_memory_t region_p; struct user_memory_bucket *bucket; debug(0, user_memory_ave_size = (user_memory_ave_size * user_memory_num_lookups + size) / (user_memory_num_lookups + 1)); debug(0, ++user_memory_num_lookups); debug(1, if (user_memory_num_lookups % 100 == 0) user_memory_statistics() ); pagenum = ADDR_TO_PAGENUM(user_addr); pageoffset = user_addr - PAGENUM_TO_ADDR(pagenum); bucket = &user_memory_hash[USER_MEMORY_HASH(task, pagenum)]; for (region_p = bucket->chain; region_p; region_p = region_p->next_inchain) { if (region_p->task == task && region_p->user_page == pagenum) break; } if (!region_p) { /* * It's not in the cache */ if ((region_p = map_region(task, user_addr, size, prot)) != NULL) { user_memory_insert(region_p); *svr_addrp = region_p->svr_addr + pageoffset; } else *svr_addrp = 0; } else { /* * NOTE: Block must be locked before releasing list lock */ if (region_p->size < pageoffset + size) { /* * Extend the region to include user_addr + size */ if (!map_region_bigger(region_p, pageoffset + size, prot)) { *svr_addrp = 0; return NULL; } } if (!region_p->is_fresh) { /* * Put it at beginning of the mru list */ delete_mru(region_p); insert_mru(region_p); } *svr_addrp = region_p->svr_addr + pageoffset; } return region_p; }
void *map_page(uint32_t hw_addr) { return map_region(hw_addr, 0x1000); }