Exemple #1
0
kern_return_t
vm32_region_info_64(
	__DEBUG_ONLY vm_map_t			map,
	__DEBUG_ONLY vm32_offset_t		address,
	__DEBUG_ONLY vm_info_region_64_t	*regionp,
	__DEBUG_ONLY vm_info_object_array_t	*objectsp,
	__DEBUG_ONLY mach_msg_type_number_t	*objectsCntp)
{
#if !MACH_VM_DEBUG
        return KERN_FAILURE;
#else
	vm_map_copy_t copy;
	vm_offset_t addr = 0;	/* memory for OOL data */
	vm_size_t size;		/* size of the memory */
	unsigned int room;	/* room for this many objects */
	unsigned int used;	/* actually this many objects */
	vm_info_region_64_t region;
	kern_return_t kr;

	if (map == VM_MAP_NULL)
		return KERN_INVALID_TASK;

	size = 0;		/* no memory allocated yet */

	for (;;) {
		vm_map_t cmap;	/* current map in traversal */
		vm_map_t nmap;	/* next map to look at */
		vm_map_entry_t entry;
		vm_object_t object, cobject, nobject;

		/* nothing is locked */

		vm_map_lock_read(map);
		for (cmap = map;; cmap = nmap) {
			/* cmap is read-locked */

			if (!vm_map_lookup_entry(cmap, address, &entry)) {
				entry = entry->vme_next;
				if (entry == vm_map_to_entry(cmap)) {
					vm_map_unlock_read(cmap);
					if (size != 0)
						kmem_free(ipc_kernel_map,
							  addr, size);
					return KERN_NO_SPACE;
				}
			}

			if (entry->is_sub_map)
				nmap = VME_SUBMAP(entry);
			else
				break;

			/* move down to the lower map */

			vm_map_lock_read(nmap);
			vm_map_unlock_read(cmap);
		}

		/* cmap is read-locked; we have a real entry */

		object = VME_OBJECT(entry);
		region.vir_start = (natural_t) entry->vme_start;
		region.vir_end = (natural_t) entry->vme_end;
		region.vir_object = (natural_t)(uintptr_t) object;
		region.vir_offset = VME_OFFSET(entry);
		region.vir_needs_copy = entry->needs_copy;
		region.vir_protection = entry->protection;
		region.vir_max_protection = entry->max_protection;
		region.vir_inheritance = entry->inheritance;
		region.vir_wired_count = entry->wired_count;
		region.vir_user_wired_count = entry->user_wired_count;

		used = 0;
		room = (unsigned int) (size / sizeof(vm_info_object_t));

		if (object == VM_OBJECT_NULL) {
			vm_map_unlock_read(cmap);
			/* no memory needed */
			break;
		}

		vm_object_lock(object);
		vm_map_unlock_read(cmap);

		for (cobject = object;; cobject = nobject) {
			/* cobject is locked */

			if (used < room) {
				vm_info_object_t *vio =
					&((vm_info_object_t *) addr)[used];

				vio->vio_object =
					(natural_t)(uintptr_t) cobject;
				vio->vio_size =
					(natural_t) cobject->vo_size;
				vio->vio_ref_count =
					cobject->ref_count;
				vio->vio_resident_page_count =
					cobject->resident_page_count;
				vio->vio_copy =
					(natural_t)(uintptr_t) cobject->copy;
				vio->vio_shadow =
					(natural_t)(uintptr_t) cobject->shadow;
				vio->vio_shadow_offset =
					(natural_t) cobject->vo_shadow_offset;
				vio->vio_paging_offset =
					(natural_t) cobject->paging_offset;
				vio->vio_copy_strategy =
					cobject->copy_strategy;
				vio->vio_last_alloc =
					(vm_offset_t) cobject->last_alloc;
				vio->vio_paging_in_progress =
					cobject->paging_in_progress +
					cobject->activity_in_progress;
				vio->vio_pager_created =
					cobject->pager_created;
				vio->vio_pager_initialized =
					cobject->pager_initialized;
				vio->vio_pager_ready =
					cobject->pager_ready;
				vio->vio_can_persist =
					cobject->can_persist;
				vio->vio_internal =
					cobject->internal;
				vio->vio_temporary =
					cobject->temporary;
				vio->vio_alive =
					cobject->alive;
				vio->vio_purgable =
					(cobject->purgable != VM_PURGABLE_DENY);
				vio->vio_purgable_volatile =
					(cobject->purgable == VM_PURGABLE_VOLATILE ||
					 cobject->purgable == VM_PURGABLE_EMPTY);
			}

			used++;
			nobject = cobject->shadow;
			if (nobject == VM_OBJECT_NULL) {
				vm_object_unlock(cobject);
				break;
			}

			vm_object_lock(nobject);
			vm_object_unlock(cobject);
		}

		/* nothing locked */

		if (used <= room)
			break;

		/* must allocate more memory */

		if (size != 0)
			kmem_free(ipc_kernel_map, addr, size);
		size = vm_map_round_page(2 * used * sizeof(vm_info_object_t),
					 VM_MAP_PAGE_MASK(ipc_kernel_map));

		kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC));
		if (kr != KERN_SUCCESS)
			return KERN_RESOURCE_SHORTAGE;

		kr = vm_map_wire(
			ipc_kernel_map,
			vm_map_trunc_page(addr,
					  VM_MAP_PAGE_MASK(ipc_kernel_map)),
			vm_map_round_page(addr + size,
					  VM_MAP_PAGE_MASK(ipc_kernel_map)),
			VM_PROT_READ|VM_PROT_WRITE,
			FALSE);
		assert(kr == KERN_SUCCESS);
	}

	/* free excess memory; make remaining memory pageable */

	if (used == 0) {
		copy = VM_MAP_COPY_NULL;

		if (size != 0)
			kmem_free(ipc_kernel_map, addr, size);
	} else {
		vm_size_t size_used = (used * sizeof(vm_info_object_t));
		vm_size_t vmsize_used = vm_map_round_page(size_used,
					  VM_MAP_PAGE_MASK(ipc_kernel_map));

		kr = vm_map_unwire(
			ipc_kernel_map,
			vm_map_trunc_page(addr,
					  VM_MAP_PAGE_MASK(ipc_kernel_map)),
			vm_map_round_page(addr + size_used,
					  VM_MAP_PAGE_MASK(ipc_kernel_map)),
			FALSE);
		assert(kr == KERN_SUCCESS);

		kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
				   (vm_map_size_t)size_used, TRUE, &copy);
		assert(kr == KERN_SUCCESS);

		if (size != vmsize_used)
			kmem_free(ipc_kernel_map,
				  addr + vmsize_used, size - vmsize_used);
	}

	*regionp = region;
	*objectsp = (vm_info_object_array_t) copy;
	*objectsCntp = used;
	return KERN_SUCCESS;
#endif /* MACH_VM_DEBUG */
}
Exemple #2
0
static  void*
commpage_allocate( 
	vm_map_t	submap,			// commpage32_map or commpage_map64
	size_t		area_used,		// _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED
	vm_prot_t	uperm)
{
	vm_offset_t	kernel_addr = 0;	// address of commpage in kernel map
	vm_offset_t	zero = 0;
	vm_size_t	size = area_used;	// size actually populated
	vm_map_entry_t	entry;
	ipc_port_t	handle;
	kern_return_t	kr;

	if (submap == NULL)
		panic("commpage submap is null");

	if ((kr = vm_map(kernel_map,
			 &kernel_addr,
			 area_used,
			 0,
			 VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_OSFMK),
			 NULL,
			 0,
			 FALSE,
			 VM_PROT_ALL,
			 VM_PROT_ALL,
			 VM_INHERIT_NONE)))
		panic("cannot allocate commpage %d", kr);

	if ((kr = vm_map_wire(kernel_map,
			      kernel_addr,
			      kernel_addr+area_used,
			      VM_PROT_DEFAULT|VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK),
			      FALSE)))
		panic("cannot wire commpage: %d", kr);

	/* 
	 * Now that the object is created and wired into the kernel map, mark it so that no delay
	 * copy-on-write will ever be performed on it as a result of mapping it into user-space.
	 * If such a delayed copy ever occurred, we could remove the kernel's wired mapping - and
	 * that would be a real disaster.
	 *
	 * JMM - What we really need is a way to create it like this in the first place.
	 */
	if (!(kr = vm_map_lookup_entry( kernel_map, vm_map_trunc_page(kernel_addr, VM_MAP_PAGE_MASK(kernel_map)), &entry) || entry->is_sub_map))
		panic("cannot find commpage entry %d", kr);
	VME_OBJECT(entry)->copy_strategy = MEMORY_OBJECT_COPY_NONE;

	if ((kr = mach_make_memory_entry( kernel_map,		// target map
				    &size,		// size 
				    kernel_addr,	// offset (address in kernel map)
				    uperm,	// protections as specified
				    &handle,		// this is the object handle we get
				    NULL )))		// parent_entry (what is this?)
		panic("cannot make entry for commpage %d", kr);

	if ((kr = vm_map_64(	submap,				// target map (shared submap)
			&zero,				// address (map into 1st page in submap)
			area_used,			// size
			0,				// mask
			VM_FLAGS_FIXED,			// flags (it must be 1st page in submap)
			handle,				// port is the memory entry we just made
			0,                              // offset (map 1st page in memory entry)
			FALSE,                          // copy
			uperm,   // cur_protection (R-only in user map)
			uperm,   // max_protection
		        VM_INHERIT_SHARE )))             // inheritance
		panic("cannot map commpage %d", kr);

	ipc_port_release(handle);
	/* Make the kernel mapping non-executable. This cannot be done
	 * at the time of map entry creation as mach_make_memory_entry
	 * cannot handle disjoint permissions at this time.
	 */
	kr = vm_protect(kernel_map, kernel_addr, area_used, FALSE, VM_PROT_READ | VM_PROT_WRITE);
	assert (kr == KERN_SUCCESS);

	return (void*)(intptr_t)kernel_addr;                     // return address in kernel map
}