/* Wire down all memory currently allocated at START for LEN bytes; host_priv is the privileged host port. */ static void wire_segment_internal (vm_address_t start, vm_size_t len, host_priv_t host_priv) { vm_address_t addr; vm_size_t size; vm_prot_t protection; vm_prot_t max_protection; vm_inherit_t inheritance; boolean_t shared; mach_port_t object_name; vm_offset_t offset; error_t err; volatile char *poke; do { addr = start; err = vm_region (mach_task_self (), &addr, &size, &protection, &max_protection, &inheritance, &shared, &object_name, &offset); if (err) return; /* The current region begins at ADDR and is SIZE long. If it extends beyond the LEN, prune it. */ if (addr + size > start + len) size = len - (addr - start); /* Set protection to allow all access possible */ vm_protect (mach_task_self (), addr, size, 0, max_protection); /* Generate write faults */ for (poke = (char *) addr; (vm_address_t) poke < addr + size; poke += vm_page_size) *poke = *poke; /* Wire pages */ vm_wire (host_priv, mach_task_self (), addr, size, max_protection); /* Set protection back to what it was */ vm_protect (mach_task_self (), addr, size, 0, protection); mach_port_deallocate (mach_task_self (), object_name); len -= (addr - start) + size; start = addr + size; } while (len); }
void osfmach3_insert_vm_struct( struct mm_struct *mm, struct vm_area_struct *vmp) { memory_object_t mem_obj; vm_offset_t mem_obj_offset; kern_return_t kr; unsigned short vm_flags; boolean_t is_shared; vm_prot_t cur_prot, max_prot; vm_address_t user_addr, wanted_addr; vm_size_t size; unsigned int id; struct shmid_ds *shp; struct osfmach3_mach_task_struct *mach_task; extern struct shmid_ds *shm_segs[SHMMNI]; if (vmp->vm_flags & VM_REMAPPING) { /* don't mess with Mach VM: it's only Linux remappings */ return; } #ifdef VMA_DEBUG if (vma_debug) { printk("VMA:osfmach3_insert_vm_struct: mm=0x%p, vmp=0x%p\n", mm, vmp); } #endif /* VMA_DEBUG */ mach_task = mm->mm_mach_task; if (vmp->vm_inode == NULL) { if (vmp->vm_pte != 0) { /* shared memory */ id = SWP_OFFSET(vmp->vm_pte) & SHM_ID_MASK; shp = shm_segs[id]; if (shp != IPC_UNUSED) { mem_obj = (mach_port_t) shp->shm_pages; mem_obj_offset = 0; } else { mem_obj = MEMORY_OBJECT_NULL; mem_obj_offset = 0; } } else { mem_obj = MEMORY_OBJECT_NULL; mem_obj_offset = 0; } } else if (S_ISREG(vmp->vm_inode->i_mode)) { mem_obj = inode_pager_setup(vmp->vm_inode); if (mem_obj == MEMORY_OBJECT_NULL) { panic("osfmach3_insert_vm_struct: can't setup pager"); } mem_obj_offset = (vm_offset_t) vmp->vm_offset; } else if (vmp->vm_inode->i_mem_object != NULL) { /* special file, but with a pager already setup */ mem_obj = vmp->vm_inode->i_mem_object->imo_mem_obj; mem_obj_offset = (vm_offset_t) vmp->vm_offset; } else { panic("osfmach3_insert_vm_struct: non-regular file"); } vm_flags = vmp->vm_flags; cur_prot = VM_PROT_NONE; if (vm_flags & VM_READ) cur_prot |= VM_PROT_READ; if (vm_flags & VM_WRITE) cur_prot |= VM_PROT_WRITE; if (vm_flags & VM_EXEC) cur_prot |= VM_PROT_EXECUTE; max_prot = VM_PROT_ALL; is_shared = (vmp->vm_flags & VM_SHARED) != 0; user_addr = vmp->vm_start; wanted_addr = user_addr; size = vmp->vm_end - vmp->vm_start; #ifdef VMA_DEBUG if (vma_debug) { printk("VMA: vm_map(task=0x%x, user_addr=0x%x, size=0x%x, " "mem_obj=0x%x, offset=0x%x, %sCOPY, cur_prot=0x%x, " "max_prot=0x%x, %s)\n", mach_task->mach_task_port, user_addr, size, mem_obj, mem_obj_offset, is_shared ? "!" : "", cur_prot, max_prot, is_shared ? "INHERIT_SHARE" : "INHERIT_COPY"); } #endif /* VMA_DEBUG */ server_thread_blocking(FALSE); kr = vm_map(mach_task->mach_task_port, &user_addr, size, 0, /* no mask */ FALSE, /* not anywhere */ mem_obj, mem_obj_offset, !is_shared, cur_prot, max_prot, is_shared ? VM_INHERIT_SHARE : VM_INHERIT_COPY); server_thread_unblocking(FALSE); if (kr != KERN_SUCCESS) { printk("Failure: vm_map(task=0x%x, user_addr=0x%x, size=0x%x, " "mem_obj=0x%x, offset=0x%x, %sCOPY, cur_prot=0x%x, " "max_prot=0x%x, %s)\n", mach_task->mach_task_port, user_addr, size, mem_obj, mem_obj_offset, is_shared ? "!" : "", cur_prot, max_prot, is_shared ? "INHERIT_SHARE" : "INHERIT_COPY"); MACH3_DEBUG(1, kr, ("osfmach3_insert_vm_struct: vm_map")); printk("osfmach3_insert_vm_struct: can't map\n"); } if (user_addr != wanted_addr) { printk("vm_map: mapped at 0x%x instead of 0x%x\n", user_addr, wanted_addr); printk("osfmach3_insert_vm_struct: mapping at wrong address\n"); } if (vmp->vm_flags & VM_LOCKED) { extern mach_port_t privileged_host_port; server_thread_blocking(FALSE); kr = vm_wire(privileged_host_port, mach_task->mach_task_port, user_addr, size, cur_prot); server_thread_unblocking(FALSE); if (kr != KERN_SUCCESS) { MACH3_DEBUG(2, kr, ("osfmach3_insert_vm_struct: " "vm_wire(task=0x%x, addr=0x%x, size=0x%x, " "prot=0x%x)", mach_task->mach_task_port, user_addr, size, cur_prot)); printk("osfmach3_insert_vm_struct: vm_wire failed\n"); } } #if 0 if (vmp->vm_inode != NULL) { /* * If mem_obj was already cached in the kernel, we got an * extra reference on its i_mem_object structure (inode_pager). * If it was the first time we mapped the inode, the memory * object has just been initialized by the kernel and we * got a reference in memory_object_init(). In both cases, * we have to release a reference. */ ASSERT(mem_obj != MEMORY_OBJECT_NULL); ASSERT(vmp->vm_inode->i_mem_object); ASSERT(vmp->vm_inode->i_mem_object->imo_mem_obj_control); inode_pager_release(vmp->vm_inode); } #endif }