Exemple #1
0
static void
link_elf_unload_file(linker_file_t file)
{
	elf_file_t ef = (elf_file_t) file;
	int i;

	/* Notify MD code that a module is being unloaded. */
	elf_cpu_unload_file(file);

	if (ef->progtab) {
		for (i = 0; i < ef->nprogtab; i++) {
			if (ef->progtab[i].size == 0)
				continue;
			if (ef->progtab[i].name == NULL)
				continue;
			if (!strcmp(ef->progtab[i].name, DPCPU_SETNAME))
				dpcpu_free(ef->progtab[i].addr,
				    ef->progtab[i].size);
#ifdef VIMAGE
			else if (!strcmp(ef->progtab[i].name, VNET_SETNAME))
				vnet_data_free(ef->progtab[i].addr,
				    ef->progtab[i].size);
#endif
		}
	}
	if (ef->preloaded) {
		free(ef->reltab, M_LINKER);
		free(ef->relatab, M_LINKER);
		free(ef->progtab, M_LINKER);
		free(ef->ctftab, M_LINKER);
		free(ef->ctfoff, M_LINKER);
		free(ef->typoff, M_LINKER);
		if (file->filename != NULL)
			preload_delete_name(file->filename);
		/* XXX reclaim module memory? */
		return;
	}

	for (i = 0; i < ef->nreltab; i++)
		free(ef->reltab[i].rel, M_LINKER);
	for (i = 0; i < ef->nrelatab; i++)
		free(ef->relatab[i].rela, M_LINKER);
	free(ef->reltab, M_LINKER);
	free(ef->relatab, M_LINKER);
	free(ef->progtab, M_LINKER);

	if (ef->object) {
		vm_map_remove(kernel_map, (vm_offset_t) ef->address,
		    (vm_offset_t) ef->address +
		    (ef->object->size << PAGE_SHIFT));
	}
	free(ef->e_shdr, M_LINKER);
	free(ef->ddbsymtab, M_LINKER);
	free(ef->ddbstrtab, M_LINKER);
	free(ef->shstrtab, M_LINKER);
	free(ef->ctftab, M_LINKER);
	free(ef->ctfoff, M_LINKER);
	free(ef->typoff, M_LINKER);
}
Exemple #2
0
/*
 *	stack_collect:
 *
 *	Free excess kernel stacks, may
 *	block.
 */
void
stack_collect(void)
{
	if (stack_collect_tick != last_stack_tick) {
		unsigned int	target;
		vm_offset_t		stack;
		spl_t			s;

		s = splsched();
		stack_lock();

		target = stack_free_target + (STACK_CACHE_SIZE * processor_count);
		target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;

		while (stack_free_count > target) {
			stack = stack_free_list;
			stack_free_list = stack_next(stack);
			stack_free_count--; stack_total--;
			stack_unlock();
			splx(s);

			/*
			 * Get the stack base address, then decrement by one page
			 * to account for the lower guard page.  Add two extra pages
			 * to the size to account for the guard pages on both ends
			 * that were originally requested when the stack was allocated
			 * back in stack_alloc().
			 */

			stack = (vm_offset_t)vm_map_trunc_page(
				stack,
				VM_MAP_PAGE_MASK(kernel_map));
			stack -= PAGE_SIZE;
			if (vm_map_remove(
				    kernel_map,
				    stack,
				    stack + kernel_stack_size+(2*PAGE_SIZE),
				    VM_MAP_REMOVE_KUNWIRE)
			    != KERN_SUCCESS)
				panic("stack_collect: vm_map_remove");
			stack = 0;

			s = splsched();
			stack_lock();

			target = stack_free_target + (STACK_CACHE_SIZE * processor_count);
			target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
		}

		last_stack_tick = stack_collect_tick;

		stack_unlock();
		splx(s);
	}
}
Exemple #3
0
void
kmem_free(
	vm_map_t 	map,
	vm_offset_t 	addr,
	vm_size_t 	size)
{
	kern_return_t kr;

	kr = vm_map_remove(map, trunc_page(addr), round_page(addr + size));
	if (kr != KERN_SUCCESS)
		panic("kmem_free");
}
Exemple #4
0
/*
 * Helper routines to allow the backing object of a shared memory file
 * descriptor to be mapped in the kernel.
 */
int
shm_map(struct file *fp, size_t size, off_t offset, void **memp)
{
	struct shmfd *shmfd;
	vm_offset_t kva, ofs;
	vm_object_t obj;
	int rv;

	if (fp->f_type != DTYPE_SHM)
		return (EINVAL);
	shmfd = fp->f_data;
	obj = shmfd->shm_object;
	VM_OBJECT_LOCK(obj);
	/*
	 * XXXRW: This validation is probably insufficient, and subject to
	 * sign errors.  It should be fixed.
	 */
	if (offset >= shmfd->shm_size ||
	    offset + size > round_page(shmfd->shm_size)) {
		VM_OBJECT_UNLOCK(obj);
		return (EINVAL);
	}

	shmfd->shm_kmappings++;
	vm_object_reference_locked(obj);
	VM_OBJECT_UNLOCK(obj);

	/* Map the object into the kernel_map and wire it. */
	kva = vm_map_min(kernel_map);
	ofs = offset & PAGE_MASK;
	offset = trunc_page(offset);
	size = round_page(size + ofs);
	rv = vm_map_find(kernel_map, obj, offset, &kva, size,
	    VMFS_ALIGNED_SPACE, VM_PROT_READ | VM_PROT_WRITE,
	    VM_PROT_READ | VM_PROT_WRITE, 0);
	if (rv == KERN_SUCCESS) {
		rv = vm_map_wire(kernel_map, kva, kva + size,
		    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
		if (rv == KERN_SUCCESS) {
			*memp = (void *)(kva + ofs);
			return (0);
		}
		vm_map_remove(kernel_map, kva, kva + size);
	} else
		vm_object_deallocate(obj);

	/* On failure, drop our mapping reference. */
	VM_OBJECT_LOCK(obj);
	shmfd->shm_kmappings--;
	VM_OBJECT_UNLOCK(obj);

	return (vm_mmap_to_errno(rv));
}
DECLHIDDEN(int) rtR0MemAllocEx(size_t cb, uint32_t fFlags, PRTMEMHDR *ppHdr)
{
    size_t      cbAllocated = cb;
    PRTMEMHDR   pHdr        = NULL;

#ifdef RT_ARCH_AMD64
    /*
     * Things are a bit more complicated on AMD64 for executable memory
     * because we need to be in the ~2GB..~0 range for code.
     */
    if (fFlags & RTMEMHDR_FLAG_EXEC)
    {
        if (fFlags & RTMEMHDR_FLAG_ANY_CTX)
            return VERR_NOT_SUPPORTED;

# ifdef USE_KMEM_ALLOC_PROT
        pHdr = (PRTMEMHDR)kmem_alloc_prot(kernel_map, cb + sizeof(*pHdr),
                                          VM_PROT_ALL, VM_PROT_ALL, KERNBASE);
# else
        vm_object_t pVmObject = NULL;
        vm_offset_t Addr = KERNBASE;
        cbAllocated = RT_ALIGN_Z(cb + sizeof(*pHdr), PAGE_SIZE);

        pVmObject = vm_object_allocate(OBJT_DEFAULT, cbAllocated >> PAGE_SHIFT);
        if (!pVmObject)
            return VERR_NO_EXEC_MEMORY;

        /* Addr contains a start address vm_map_find will start searching for suitable space at. */
        int rc = vm_map_find(kernel_map, pVmObject, 0, &Addr,
                             cbAllocated, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0);
        if (rc == KERN_SUCCESS)
        {
            rc = vm_map_wire(kernel_map, Addr, Addr + cbAllocated,
                             VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
            if (rc == KERN_SUCCESS)
            {
                pHdr = (PRTMEMHDR)Addr;

                if (fFlags & RTMEMHDR_FLAG_ZEROED)
                    bzero(pHdr, cbAllocated);
            }
            else
                vm_map_remove(kernel_map,
                              Addr,
                              Addr + cbAllocated);
        }
        else
            vm_object_deallocate(pVmObject);
# endif
    }
    else
#endif
    {
Exemple #6
0
/*
 * munmap system call handler
 *
 * munmap_args(void *addr, size_t len)
 *
 * No requirements
 */
int
sys_munmap(struct munmap_args *uap)
{
	struct proc *p = curproc;
	vm_offset_t addr;
	vm_offset_t tmpaddr;
	vm_size_t size, pageoff;
	vm_map_t map;

	addr = (vm_offset_t) uap->addr;
	size = uap->len;

	pageoff = (addr & PAGE_MASK);
	addr -= pageoff;
	size += pageoff;
	size = (vm_size_t) round_page(size);
	if (size < uap->len)		/* wrap */
		return(EINVAL);
	tmpaddr = addr + size;		/* workaround gcc4 opt */
	if (tmpaddr < addr)		/* wrap */
		return(EINVAL);

	if (size == 0)
		return (0);

	/*
	 * Check for illegal addresses.  Watch out for address wrap... Note
	 * that VM_*_ADDRESS are not constants due to casts (argh).
	 */
	if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS)
		return (EINVAL);
	if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS)
		return (EINVAL);

	map = &p->p_vmspace->vm_map;

	/* map->token serializes between the map check and the actual unmap */
	lwkt_gettoken(&map->token);

	/*
	 * Make sure entire range is allocated.
	 */
	if (!vm_map_check_protection(map, addr, addr + size,
				     VM_PROT_NONE, FALSE)) {
		lwkt_reltoken(&map->token);
		return (EINVAL);
	}
	/* returns nothing but KERN_SUCCESS anyway */
	vm_map_remove(map, addr, addr + size);
	lwkt_reltoken(&map->token);
	return (0);
}
Exemple #7
0
void
kmem_io_map_deallocate(
	vm_map_t	map,
	vm_offset_t	addr,
	vm_size_t	size)
{
	/*
	 *	Remove the mappings.  The pmap_remove is needed.
	 */
	
	pmap_remove(vm_map_pmap(map), addr, addr + size);
	vm_map_remove(map, addr, addr + size);
}
Exemple #8
0
static int rtR0MemObjFreeBSDAllocHelper(PRTR0MEMOBJFREEBSD pMemFreeBSD, bool fExecutable,
                                        vm_paddr_t VmPhysAddrHigh, bool fContiguous, int rcNoMem)
{
    vm_offset_t MapAddress = vm_map_min(kernel_map);
    size_t      cPages = atop(pMemFreeBSD->Core.cb);
    int         rc;

    pMemFreeBSD->pObject = vm_object_allocate(OBJT_PHYS, cPages);

    /* No additional object reference for auto-deallocation upon unmapping. */
#if __FreeBSD_version >= 1000055
    rc = vm_map_find(kernel_map, pMemFreeBSD->pObject, 0,
                     &MapAddress, pMemFreeBSD->Core.cb, 0, VMFS_ANY_SPACE,
                     fExecutable ? VM_PROT_ALL : VM_PROT_RW, VM_PROT_ALL, 0);
#else
    rc = vm_map_find(kernel_map, pMemFreeBSD->pObject, 0,
                     &MapAddress, pMemFreeBSD->Core.cb, VMFS_ANY_SPACE,
                     fExecutable ? VM_PROT_ALL : VM_PROT_RW, VM_PROT_ALL, 0);
#endif

    if (rc == KERN_SUCCESS)
    {
        rc = rtR0MemObjFreeBSDPhysAllocHelper(pMemFreeBSD->pObject, cPages,
                                              VmPhysAddrHigh, PAGE_SIZE, fContiguous,
                                              false, rcNoMem);
        if (RT_SUCCESS(rc))
        {
            vm_map_wire(kernel_map, MapAddress, MapAddress + pMemFreeBSD->Core.cb,
                        VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);

            /* Store start address */
            pMemFreeBSD->Core.pv = (void *)MapAddress;
            return VINF_SUCCESS;
        }

        vm_map_remove(kernel_map, MapAddress, MapAddress + pMemFreeBSD->Core.cb);
    }
    else
    {
        rc = rcNoMem; /** @todo fix translation (borrow from darwin) */
        vm_object_deallocate(pMemFreeBSD->pObject);
    }

    rtR0MemObjDelete(&pMemFreeBSD->Core);
    return rc;
}
/*
 * Try to remove FS references in the specified process.  This function
 * is used during shutdown
 */
static
void
shutdown_cleanup_proc(struct proc *p)
{
	struct filedesc *fdp;
	struct vmspace *vm;

	if (p == NULL)
		return;
	if ((fdp = p->p_fd) != NULL) {
		kern_closefrom(0);
		if (fdp->fd_cdir) {
			cache_drop(&fdp->fd_ncdir);
			vrele(fdp->fd_cdir);
			fdp->fd_cdir = NULL;
		}
		if (fdp->fd_rdir) {
			cache_drop(&fdp->fd_nrdir);
			vrele(fdp->fd_rdir);
			fdp->fd_rdir = NULL;
		}
		if (fdp->fd_jdir) {
			cache_drop(&fdp->fd_njdir);
			vrele(fdp->fd_jdir);
			fdp->fd_jdir = NULL;
		}
	}
	if (p->p_vkernel)
		vkernel_exit(p);
	if (p->p_textvp) {
		vrele(p->p_textvp);
		p->p_textvp = NULL;
	}
	vm = p->p_vmspace;
	if (vm != NULL) {
		pmap_remove_pages(vmspace_pmap(vm),
				  VM_MIN_USER_ADDRESS,
				  VM_MAX_USER_ADDRESS);
		vm_map_remove(&vm->vm_map,
			      VM_MIN_USER_ADDRESS,
			      VM_MAX_USER_ADDRESS);
	}
}
Exemple #10
0
/*
 * We require the caller to unmap the entire entry.  This allows us to
 * safely decrement shm_kmappings when a mapping is removed.
 */
int
shm_unmap(struct file *fp, void *mem, size_t size)
{
	struct shmfd *shmfd;
	vm_map_entry_t entry;
	vm_offset_t kva, ofs;
	vm_object_t obj;
	vm_pindex_t pindex;
	vm_prot_t prot;
	boolean_t wired;
	vm_map_t map;
	int rv;

	if (fp->f_type != DTYPE_SHM)
		return (EINVAL);
	shmfd = fp->f_data;
	kva = (vm_offset_t)mem;
	ofs = kva & PAGE_MASK;
	kva = trunc_page(kva);
	size = round_page(size + ofs);
	map = kernel_map;
	rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
	    &obj, &pindex, &prot, &wired);
	if (rv != KERN_SUCCESS)
		return (EINVAL);
	if (entry->start != kva || entry->end != kva + size) {
		vm_map_lookup_done(map, entry);
		return (EINVAL);
	}
	vm_map_lookup_done(map, entry);
	if (obj != shmfd->shm_object)
		return (EINVAL);
	vm_map_remove(map, kva, kva + size);
	VM_OBJECT_LOCK(obj);
	KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
	shmfd->shm_kmappings--;
	VM_OBJECT_UNLOCK(obj);
	return (0);
}
Exemple #11
0
static void
link_elf_unload_file(linker_file_t file)
{
    elf_file_t ef = file->priv;

    if (ef) {
#ifdef SPARSE_MAPPING
	if (ef->object) {
	    vm_map_remove(&kernel_map, (vm_offset_t) ef->address,
			  (vm_offset_t) ef->address
			  + (ef->object->size << PAGE_SHIFT));
	    vm_object_deallocate(ef->object);
	}
#else
	if (ef->address)
	    kfree(ef->address, M_LINKER);
#endif
	if (ef->symbase)
	    kfree(ef->symbase, M_LINKER);
	if (ef->strbase)
	    kfree(ef->strbase, M_LINKER);
	kfree(ef, M_LINKER);
    }
}
Exemple #12
0
static int
coff_load_file(struct thread *td, char *name)
{
	struct proc *p = td->td_proc;
  	struct vmspace *vmspace = p->p_vmspace;
  	int error;
  	struct nameidata nd;
  	struct vnode *vp;
  	struct vattr attr;
  	struct filehdr *fhdr;
  	struct aouthdr *ahdr;
  	struct scnhdr *scns;
  	char *ptr = 0;
  	int nscns;
  	unsigned long text_offset = 0, text_address = 0, text_size = 0;
  	unsigned long data_offset = 0, data_address = 0, data_size = 0;
  	unsigned long bss_size = 0;
  	int i;

	NDINIT(&nd, LOOKUP, ISOPEN | LOCKLEAF | FOLLOW | SAVENAME,
	    UIO_SYSSPACE, name, td);

  	error = namei(&nd);
  	if (error)
    		return error;

  	vp = nd.ni_vp;
  	if (vp == NULL)
    		return ENOEXEC;

  	if (vp->v_writecount) {
    		error = ETXTBSY;
    		goto fail;
  	}

  	if ((error = VOP_GETATTR(vp, &attr, td->td_ucred, td)) != 0)
    		goto fail;

  	if ((vp->v_mount->mnt_flag & MNT_NOEXEC)
	    || ((attr.va_mode & 0111) == 0)
	    || (attr.va_type != VREG))
    		goto fail;

  	if (attr.va_size == 0) {
    		error = ENOEXEC;
    		goto fail;
  	}

  	if ((error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td)) != 0)
    		goto fail;

  	if ((error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL)) != 0)
    		goto fail;

	/*
	 * Lose the lock on the vnode. It's no longer needed, and must not
	 * exist for the pagefault paging to work below.
	 */
	VOP_UNLOCK(vp, 0, td);

  	if ((error = vm_mmap(kernel_map,
			    (vm_offset_t *) &ptr,
			    PAGE_SIZE,
			    VM_PROT_READ,
		       	    VM_PROT_READ,
			    0,
			    OBJT_VNODE,
			    vp,
			    0)) != 0)
		goto unlocked_fail;

  	fhdr = (struct filehdr *)ptr;

  	if (fhdr->f_magic != I386_COFF) {
    		error = ENOEXEC;
    		goto dealloc_and_fail;
  	}

  	nscns = fhdr->f_nscns;

  	if ((nscns * sizeof(struct scnhdr)) > PAGE_SIZE) {
    		/*
     		 * XXX -- just fail.  I'm so lazy.
     		 */
    		error = ENOEXEC;
    		goto dealloc_and_fail;
  	}

  	ahdr = (struct aouthdr*)(ptr + sizeof(struct filehdr));

  	scns = (struct scnhdr*)(ptr + sizeof(struct filehdr)
			  + sizeof(struct aouthdr));

  	for (i = 0; i < nscns; i++) {
    		if (scns[i].s_flags & STYP_NOLOAD)
      			continue;
    		else if (scns[i].s_flags & STYP_TEXT) {
      			text_address = scns[i].s_vaddr;
      			text_size = scns[i].s_size;
      			text_offset = scns[i].s_scnptr;
    		}
		else if (scns[i].s_flags & STYP_DATA) {
      			data_address = scns[i].s_vaddr;
      			data_size = scns[i].s_size;
      			data_offset = scns[i].s_scnptr;
    		} else if (scns[i].s_flags & STYP_BSS) {
      			bss_size = scns[i].s_size;
    		}
  	}

  	if ((error = load_coff_section(vmspace, vp, text_offset,
				      (caddr_t)(void *)(uintptr_t)text_address,
				      text_size, text_size,
				      VM_PROT_READ | VM_PROT_EXECUTE)) != 0) {
    		goto dealloc_and_fail;
  	}
  	if ((error = load_coff_section(vmspace, vp, data_offset,
				      (caddr_t)(void *)(uintptr_t)data_address,
				      data_size + bss_size, data_size,
				      VM_PROT_ALL)) != 0) {
    		goto dealloc_and_fail;
  	}

  	error = 0;

 dealloc_and_fail:
	if (vm_map_remove(kernel_map,
			  (vm_offset_t) ptr,
			  (vm_offset_t) ptr + PAGE_SIZE))
    		panic("%s vm_map_remove failed", __func__);

 fail:
	VOP_UNLOCK(vp, 0, td);
 unlocked_fail:
	NDFREE(&nd, NDF_ONLY_PNBUF);
	vrele(nd.ni_vp);
  	return error;
}
kern_return_t
map_fd_funneled(
	int			fd,
	vm_object_offset_t	offset,
	vm_offset_t		*va,
	boolean_t		findspace,
	vm_size_t		size)
{
	kern_return_t	result;
	struct fileproc	*fp;
	struct vnode	*vp;
	void *	pager;
	vm_offset_t	map_addr=0;
	vm_size_t	map_size;
	int		err=0;
	vm_map_t	my_map;
	proc_t		p = current_proc();
	struct vnode_attr vattr;

	/*
	 *	Find the inode; verify that it's a regular file.
	 */

	err = fp_lookup(p, fd, &fp, 0);
	if (err)
		return(err);
	
	if (fp->f_fglob->fg_type != DTYPE_VNODE){
		err = KERN_INVALID_ARGUMENT;
		goto bad;
	}

	if (!(fp->f_fglob->fg_flag & FREAD)) {
		err = KERN_PROTECTION_FAILURE;
		goto bad;
	}

	vp = (struct vnode *)fp->f_fglob->fg_data;
	err = vnode_getwithref(vp);
	if(err != 0) 
		goto bad;

	if (vp->v_type != VREG) {
		(void)vnode_put(vp);
		err = KERN_INVALID_ARGUMENT;
		goto bad;
	}

	AUDIT_ARG(vnpath, vp, ARG_VNODE1);

	/*
	 * POSIX: mmap needs to update access time for mapped files
	 */
	if ((vnode_vfsvisflags(vp) & MNT_NOATIME) == 0) {
		VATTR_INIT(&vattr);
		nanotime(&vattr.va_access_time);
		VATTR_SET_ACTIVE(&vattr, va_access_time);
		vnode_setattr(vp, &vattr, vfs_context_current());
	}
	
	if (offset & PAGE_MASK_64) {
		printf("map_fd: file offset not page aligned(%d : %s)\n",p->p_pid, p->p_comm);
		(void)vnode_put(vp);
		err = KERN_INVALID_ARGUMENT;
		goto bad;
	}
	map_size = round_page(size);

	/*
	 * Allow user to map in a zero length file.
	 */
	if (size == 0) {
		(void)vnode_put(vp);
		err = KERN_SUCCESS;
		goto bad;
	}
	/*
	 *	Map in the file.
	 */
	pager = (void *)ubc_getpager(vp);
	if (pager == NULL) {
		(void)vnode_put(vp);
		err = KERN_FAILURE;
		goto bad;
	}


	my_map = current_map();

	result = vm_map_64(
			my_map,
			&map_addr, map_size, (vm_offset_t)0, 
			VM_FLAGS_ANYWHERE, pager, offset, TRUE,
			VM_PROT_DEFAULT, VM_PROT_ALL,
			VM_INHERIT_DEFAULT);
	if (result != KERN_SUCCESS) {
		(void)vnode_put(vp);
		err = result;
		goto bad;
	}


	if (!findspace) {
		vm_offset_t	dst_addr;
		vm_map_copy_t	tmp;

		if (copyin(CAST_USER_ADDR_T(va), &dst_addr, sizeof (dst_addr))	||
					trunc_page_32(dst_addr) != dst_addr) {
			(void) vm_map_remove(
					my_map,
					map_addr, map_addr + map_size,
					VM_MAP_NO_FLAGS);
			(void)vnode_put(vp);
			err = KERN_INVALID_ADDRESS;
			goto bad;
		}

		result = vm_map_copyin(my_map, (vm_map_address_t)map_addr,
				       (vm_map_size_t)map_size, TRUE, &tmp);
		if (result != KERN_SUCCESS) {
			
			(void) vm_map_remove(my_map, vm_map_trunc_page(map_addr),
					vm_map_round_page(map_addr + map_size),
					VM_MAP_NO_FLAGS);
			(void)vnode_put(vp);
			err = result;
			goto bad;
		}

		result = vm_map_copy_overwrite(my_map,
					(vm_map_address_t)dst_addr, tmp, FALSE);
		if (result != KERN_SUCCESS) {
			vm_map_copy_discard(tmp);
			(void)vnode_put(vp);
			err = result;
			goto bad;
		}
	} else {
		if (copyout(&map_addr, CAST_USER_ADDR_T(va), sizeof (map_addr))) {
			(void) vm_map_remove(my_map, vm_map_trunc_page(map_addr),
					vm_map_round_page(map_addr + map_size),
					VM_MAP_NO_FLAGS);
			(void)vnode_put(vp);
			err = KERN_INVALID_ADDRESS;
			goto bad;
		}
	}

	ubc_setthreadcred(vp, current_proc(), current_thread());
	(void)ubc_map(vp, (PROT_READ | PROT_EXEC));
	(void)vnode_put(vp);
	err = 0;
bad:
	fp_drop(p, fd, fp, 0);
	return (err);
}
Exemple #14
0
static void
link_elf_obj_unload_file(linker_file_t file)
{
	elf_file_t	ef = file->priv;
	int i;

	if (ef->progtab) {
		for (i = 0; i < ef->nprogtab; i++) {
			if (ef->progtab[i].size == 0)
				continue;
			if (ef->progtab[i].name == NULL)
				continue;
#if 0
			if (!strcmp(ef->progtab[i].name, "set_pcpu"))
				dpcpu_free(ef->progtab[i].addr,
				    ef->progtab[i].size);
#ifdef VIMAGE
			else if (!strcmp(ef->progtab[i].name, VNET_SETNAME))
				vnet_data_free(ef->progtab[i].addr,
				    ef->progtab[i].size);
#endif
#endif
		}
	}
	if (ef->preloaded) {
		if (ef->reltab)
			kfree(ef->reltab, M_LINKER);
		if (ef->relatab)
			kfree(ef->relatab, M_LINKER);
		if (ef->progtab)
			kfree(ef->progtab, M_LINKER);
		if (ef->ctftab)
			kfree(ef->ctftab, M_LINKER);
		if (ef->ctfoff)
			kfree(ef->ctfoff, M_LINKER);
		if (ef->typoff)
			kfree(ef->typoff, M_LINKER);
		if (file->filename != NULL)
			preload_delete_name(file->filename);
		kfree(ef, M_LINKER);
		/* XXX reclaim module memory? */
		return;
	}

	for (i = 0; i < ef->nreltab; i++)
		if (ef->reltab[i].rel)
			kfree(ef->reltab[i].rel, M_LINKER);
	for (i = 0; i < ef->nrelatab; i++)
		if (ef->relatab[i].rela)
			kfree(ef->relatab[i].rela, M_LINKER);
	if (ef->reltab)
		kfree(ef->reltab, M_LINKER);
	if (ef->relatab)
		kfree(ef->relatab, M_LINKER);
	if (ef->progtab)
		kfree(ef->progtab, M_LINKER);

	if (ef->object) {
#if defined(__x86_64__) && defined(_KERNEL_VIRTUAL)
		vkernel_module_memory_free((vm_offset_t)ef->address, ef->bytes);
#else
		vm_map_remove(&kernel_map, (vm_offset_t) ef->address,
		    (vm_offset_t) ef->address +
		    (ef->object->size << PAGE_SHIFT));
#endif
		vm_object_deallocate(ef->object);
		ef->object = NULL;
	}
	if (ef->e_shdr)
		kfree(ef->e_shdr, M_LINKER);
	if (ef->ddbsymtab)
		kfree(ef->ddbsymtab, M_LINKER);
	if (ef->ddbstrtab)
		kfree(ef->ddbstrtab, M_LINKER);
	if (ef->shstrtab)
		kfree(ef->shstrtab, M_LINKER);
	if (ef->ctftab)
		kfree(ef->ctftab, M_LINKER);
	if (ef->ctfoff)
		kfree(ef->ctfoff, M_LINKER);
	if (ef->typoff)
		kfree(ef->typoff, M_LINKER);
	kfree(ef, M_LINKER);
}
static
load_return_t
load_dylinker(
    struct dylinker_command	*lcp,
    integer_t		archbits,
    vm_map_t		map,
    thread_t	thread,
    int			depth,
    load_result_t		*result,
    boolean_t		is_64bit
)
{
    char			*name;
    char			*p;
    struct vnode		*vp = NULLVP;	/* set by get_macho_vnode() */
    struct mach_header	header;
    off_t			file_offset = 0; /* set by get_macho_vnode() */
    off_t			macho_size = 0;	/* set by get_macho_vnode() */
    vm_map_t		copy_map;
    load_result_t		myresult;
    kern_return_t		ret;
    vm_map_copy_t	tmp;
    mach_vm_offset_t	dyl_start, map_addr;
    mach_vm_size_t		dyl_length;

    name = (char *)lcp + lcp->name.offset;
    /*
     *	Check for a proper null terminated string.
     */
    p = name;
    do {
        if (p >= (char *)lcp + lcp->cmdsize)
            return(LOAD_BADMACHO);
    } while (*p++);

    ret = get_macho_vnode(name, archbits, &header, &file_offset, &macho_size, &vp);
    if (ret)
        return (ret);

    myresult = load_result_null;

    /*
     *	First try to map dyld in directly.  This should work most of
     *	the time since there shouldn't normally be something already
     *	mapped to its address.
     */

    ret = parse_machfile(vp, map, thread, &header, file_offset, macho_size,
                         depth, &myresult);

    /*
     *	If it turned out something was in the way, then we'll take
     *	take this longer path to map dyld into a temporary map and
     *	copy it into destination map at a different address.
     */

    if (ret == LOAD_NOSPACE) {

        /*
         *	Load the Mach-O.
         *	Use a temporary map to do the work.
         */
        copy_map = vm_map_create(pmap_create(vm_map_round_page(macho_size),
                                             is_64bit),
                                 get_map_min(map), get_map_max(map), TRUE);
        if (VM_MAP_NULL == copy_map) {
            ret = LOAD_RESOURCE;
            goto out;
        }

        myresult = load_result_null;

        ret = parse_machfile(vp, copy_map, thread, &header,
                             file_offset, macho_size,
                             depth, &myresult);

        if (ret) {
            vm_map_deallocate(copy_map);
            goto out;
        }

        if (get_map_nentries(copy_map) > 0) {

            dyl_start = mach_get_vm_start(copy_map);
            dyl_length = mach_get_vm_end(copy_map) - dyl_start;

            map_addr = dyl_start;
            ret = mach_vm_allocate(map, &map_addr, dyl_length, VM_FLAGS_ANYWHERE);

            if (ret != KERN_SUCCESS) {
                vm_map_deallocate(copy_map);
                ret = LOAD_NOSPACE;
                goto out;

            }

            ret = vm_map_copyin(copy_map,
                                (vm_map_address_t)dyl_start,
                                (vm_map_size_t)dyl_length,
                                TRUE, &tmp);
            if (ret != KERN_SUCCESS) {
                (void) vm_map_remove(map,
                                     vm_map_trunc_page(map_addr),
                                     vm_map_round_page(map_addr + dyl_length),
                                     VM_MAP_NO_FLAGS);
                vm_map_deallocate(copy_map);
                goto out;
            }

            ret = vm_map_copy_overwrite(map,
                                        (vm_map_address_t)map_addr,
                                        tmp, FALSE);
            if (ret != KERN_SUCCESS) {
                vm_map_copy_discard(tmp);
                (void) vm_map_remove(map,
                                     vm_map_trunc_page(map_addr),
                                     vm_map_round_page(map_addr + dyl_length),
                                     VM_MAP_NO_FLAGS);
                vm_map_deallocate(copy_map);
                goto out;
            }

            if (map_addr != dyl_start)
                myresult.entry_point += (map_addr - dyl_start);
        } else {
            ret = LOAD_FAILURE;
        }

        vm_map_deallocate(copy_map);
    }

    if (ret == LOAD_SUCCESS) {
        result->dynlinker = TRUE;
        result->entry_point = myresult.entry_point;
        (void)ubc_map(vp, PROT_READ | PROT_EXEC);
    }
out:
    vnode_put(vp);
    return (ret);

}
Exemple #16
0
kern_return_t
kmem_alloc_contig(
	vm_map_t		map,
	vm_offset_t		*addrp,
	vm_size_t		size,
	vm_offset_t 		mask,
	ppnum_t			max_pnum,
	ppnum_t			pnum_mask,
	int 			flags)
{
	vm_object_t		object;
	vm_object_offset_t	offset;
	vm_map_offset_t		map_addr; 
	vm_map_offset_t		map_mask;
	vm_map_size_t		map_size, i;
	vm_map_entry_t		entry;
	vm_page_t		m, pages;
	kern_return_t		kr;

	if (map == VM_MAP_NULL || (flags & ~(KMA_KOBJECT | KMA_LOMEM | KMA_NOPAGEWAIT))) 
		return KERN_INVALID_ARGUMENT;
	
	if (size == 0) {
		*addrp = 0;
		return KERN_INVALID_ARGUMENT;
	}

	map_size = vm_map_round_page(size);
	map_mask = (vm_map_offset_t)mask;

	/*
	 *	Allocate a new object (if necessary) and the reference we
	 *	will be donating to the map entry.  We must do this before
	 *	locking the map, or risk deadlock with the default pager.
	 */
	if ((flags & KMA_KOBJECT) != 0) {
		object = kernel_object;
		vm_object_reference(object);
	} else {
		object = vm_object_allocate(map_size);
	}

	kr = vm_map_find_space(map, &map_addr, map_size, map_mask, 0, &entry);
	if (KERN_SUCCESS != kr) {
		vm_object_deallocate(object);
		return kr;
	}

	entry->object.vm_object = object;
	entry->offset = offset = (object == kernel_object) ? 
		        map_addr : 0;

	/* Take an extra object ref in case the map entry gets deleted */
	vm_object_reference(object);
	vm_map_unlock(map);

	kr = cpm_allocate(CAST_DOWN(vm_size_t, map_size), &pages, max_pnum, pnum_mask, FALSE, flags);

	if (kr != KERN_SUCCESS) {
		vm_map_remove(map, vm_map_trunc_page(map_addr),
			      vm_map_round_page(map_addr + map_size), 0);
		vm_object_deallocate(object);
		*addrp = 0;
		return kr;
	}

	vm_object_lock(object);
	for (i = 0; i < map_size; i += PAGE_SIZE) {
		m = pages;
		pages = NEXT_PAGE(m);
		*(NEXT_PAGE_PTR(m)) = VM_PAGE_NULL;
		m->busy = FALSE;
		vm_page_insert(m, object, offset + i);
	}
	vm_object_unlock(object);

	if ((kr = vm_map_wire(map, vm_map_trunc_page(map_addr),
			      vm_map_round_page(map_addr + map_size), VM_PROT_DEFAULT, FALSE)) 
		!= KERN_SUCCESS) {
		if (object == kernel_object) {
			vm_object_lock(object);
			vm_object_page_remove(object, offset, offset + map_size);
			vm_object_unlock(object);
		}
		vm_map_remove(map, vm_map_trunc_page(map_addr), 
			      vm_map_round_page(map_addr + map_size), 0);
		vm_object_deallocate(object);
		return kr;
	}
	vm_object_deallocate(object);

	if (object == kernel_object)
		vm_map_simplify(map, map_addr);

	*addrp = (vm_offset_t) map_addr;
	assert((vm_map_offset_t) *addrp == map_addr);
	return KERN_SUCCESS;
}
Exemple #17
0
DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
{
    PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem;
    int rc;

    switch (pMemFreeBSD->Core.enmType)
    {
        case RTR0MEMOBJTYPE_PAGE:
        case RTR0MEMOBJTYPE_LOW:
        case RTR0MEMOBJTYPE_CONT:
            rc = vm_map_remove(kernel_map,
                                (vm_offset_t)pMemFreeBSD->Core.pv,
                                (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
            AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
            break;

        case RTR0MEMOBJTYPE_LOCK:
        {
            vm_map_t pMap = kernel_map;

            if (pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
                pMap = &((struct proc *)pMemFreeBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map;

            rc = vm_map_unwire(pMap,
                               (vm_offset_t)pMemFreeBSD->Core.pv,
                               (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb,
                               VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
            AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
            break;
        }

        case RTR0MEMOBJTYPE_RES_VIRT:
        {
            vm_map_t pMap = kernel_map;
            if (pMemFreeBSD->Core.u.ResVirt.R0Process != NIL_RTR0PROCESS)
                pMap = &((struct proc *)pMemFreeBSD->Core.u.ResVirt.R0Process)->p_vmspace->vm_map;
            rc = vm_map_remove(pMap,
                               (vm_offset_t)pMemFreeBSD->Core.pv,
                               (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
            AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
            break;
        }

        case RTR0MEMOBJTYPE_MAPPING:
        {
            vm_map_t pMap = kernel_map;

            if (pMemFreeBSD->Core.u.Mapping.R0Process != NIL_RTR0PROCESS)
                pMap = &((struct proc *)pMemFreeBSD->Core.u.Mapping.R0Process)->p_vmspace->vm_map;
            rc = vm_map_remove(pMap,
                               (vm_offset_t)pMemFreeBSD->Core.pv,
                               (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
            AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
            break;
        }

        case RTR0MEMOBJTYPE_PHYS:
        case RTR0MEMOBJTYPE_PHYS_NC:
        {
#if __FreeBSD_version >= 1000030
            VM_OBJECT_WLOCK(pMemFreeBSD->pObject);
#else
            VM_OBJECT_LOCK(pMemFreeBSD->pObject);
#endif
            vm_page_t pPage = vm_page_find_least(pMemFreeBSD->pObject, 0);
            vm_page_lock_queues();
            for (vm_page_t pPage = vm_page_find_least(pMemFreeBSD->pObject, 0);
                 pPage != NULL;
                 pPage = vm_page_next(pPage))
            {
                vm_page_unwire(pPage, 0);
            }
            vm_page_unlock_queues();
#if __FreeBSD_version >= 1000030
            VM_OBJECT_WUNLOCK(pMemFreeBSD->pObject);
#else
            VM_OBJECT_UNLOCK(pMemFreeBSD->pObject);
#endif
            vm_object_deallocate(pMemFreeBSD->pObject);
            break;
        }

        default:
            AssertMsgFailed(("enmType=%d\n", pMemFreeBSD->Core.enmType));
            return VERR_INTERNAL_ERROR;
    }

    return VINF_SUCCESS;
}
Exemple #18
0
DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
                                        unsigned fProt, RTR0PROCESS R0Process)
{
    /*
     * Check for unsupported stuff.
     */
    AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
    if (uAlignment > PAGE_SIZE)
        return VERR_NOT_SUPPORTED;

    int                rc;
    PRTR0MEMOBJFREEBSD pMemToMapFreeBSD = (PRTR0MEMOBJFREEBSD)pMemToMap;
    struct proc       *pProc            = (struct proc *)R0Process;
    struct vm_map     *pProcMap         = &pProc->p_vmspace->vm_map;

    /* calc protection */
    vm_prot_t       ProtectionFlags = 0;
    if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
        ProtectionFlags = VM_PROT_NONE;
    if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
        ProtectionFlags |= VM_PROT_READ;
    if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
        ProtectionFlags |= VM_PROT_WRITE;
    if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
        ProtectionFlags |= VM_PROT_EXECUTE;

    /* calc mapping address */
    vm_offset_t AddrR3;
    if (R3PtrFixed == (RTR3PTR)-1)
    {
        /** @todo: is this needed?. */
        PROC_LOCK(pProc);
        AddrR3 = round_page((vm_offset_t)pProc->p_vmspace->vm_daddr + lim_max(pProc, RLIMIT_DATA));
        PROC_UNLOCK(pProc);
    }
    else
        AddrR3 = (vm_offset_t)R3PtrFixed;

    /* Insert the pObject in the map. */
    vm_object_reference(pMemToMapFreeBSD->pObject);
    rc = vm_map_find(pProcMap,              /* Map to insert the object in */
                     pMemToMapFreeBSD->pObject, /* Object to map */
                     0,                     /* Start offset in the object */
                     &AddrR3,               /* Start address IN/OUT */
                     pMemToMap->cb,         /* Size of the mapping */
#if __FreeBSD_version >= 1000055
                     0,                     /* Upper bound of the mapping */
#endif
                     R3PtrFixed == (RTR3PTR)-1 ? VMFS_ANY_SPACE : VMFS_NO_SPACE,
                                            /* Whether a suitable address should be searched for first */
                     ProtectionFlags,       /* protection flags */
                     VM_PROT_ALL,           /* Maximum protection flags */
                     0);                    /* copy-on-write and similar flags */

    if (rc == KERN_SUCCESS)
    {
        rc = vm_map_wire(pProcMap, AddrR3, AddrR3 + pMemToMap->cb, VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
        AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));

        rc = vm_map_inherit(pProcMap, AddrR3, AddrR3 + pMemToMap->cb, VM_INHERIT_SHARE);
        AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));

        /*
         * Create a mapping object for it.
         */
        PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(RTR0MEMOBJFREEBSD),
                                                                           RTR0MEMOBJTYPE_MAPPING,
                                                                           (void *)AddrR3,
                                                                           pMemToMap->cb);
        if (pMemFreeBSD)
        {
            Assert((vm_offset_t)pMemFreeBSD->Core.pv == AddrR3);
            pMemFreeBSD->Core.u.Mapping.R0Process = R0Process;
            *ppMem = &pMemFreeBSD->Core;
            return VINF_SUCCESS;
        }

        rc = vm_map_remove(pProcMap, AddrR3, AddrR3 + pMemToMap->cb);
        AssertMsg(rc == KERN_SUCCESS, ("Deleting mapping failed\n"));
    }
    else
        vm_object_deallocate(pMemToMapFreeBSD->pObject);

    return VERR_NO_MEMORY;
}
Exemple #19
0
/*
 * Internal version of mmap.
 * Currently used by mmap, exec, and sys5 shared memory.
 * Handle is either a vnode pointer or NULL for MAP_ANON.
 * 
 * No requirements
 */
int
vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
	vm_prot_t maxprot, int flags, void *handle, vm_ooffset_t foff)
{
	boolean_t fitit;
	vm_object_t object;
	vm_offset_t eaddr;
	vm_size_t   esize;
	vm_size_t   align;
	int (*uksmap)(cdev_t dev, vm_page_t fake);
	struct vnode *vp;
	struct thread *td = curthread;
	struct proc *p;
	int rv = KERN_SUCCESS;
	off_t objsize;
	int docow;
	int error;

	if (size == 0)
		return (0);

	objsize = round_page(size);
	if (objsize < size)
		return (EINVAL);
	size = objsize;

	lwkt_gettoken(&map->token);
	
	/*
	 * XXX messy code, fixme
	 *
	 * NOTE: Overflow checks require discrete statements or GCC4
	 * will optimize it out.
	 */
	if ((p = curproc) != NULL && map == &p->p_vmspace->vm_map) {
		esize = map->size + size;	/* workaround gcc4 opt */
		if (esize < map->size ||
		    esize > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
			lwkt_reltoken(&map->token);
			return(ENOMEM);
		}
	}

	/*
	 * We currently can only deal with page aligned file offsets.
	 * The check is here rather than in the syscall because the
	 * kernel calls this function internally for other mmaping
	 * operations (such as in exec) and non-aligned offsets will
	 * cause pmap inconsistencies...so we want to be sure to
	 * disallow this in all cases.
	 *
	 * NOTE: Overflow checks require discrete statements or GCC4
	 * will optimize it out.
	 */
	if (foff & PAGE_MASK) {
		lwkt_reltoken(&map->token);
		return (EINVAL);
	}

	/*
	 * Handle alignment.  For large memory maps it is possible
	 * that the MMU can optimize the page table so align anything
	 * that is a multiple of SEG_SIZE to SEG_SIZE.
	 *
	 * Also align any large mapping (bigger than 16x SG_SIZE) to a
	 * SEG_SIZE address boundary.
	 */
	if (flags & MAP_SIZEALIGN) {
		align = size;
		if ((align ^ (align - 1)) != (align << 1) - 1) {
			lwkt_reltoken(&map->token);
			return (EINVAL);
		}
	} else if ((flags & MAP_FIXED) == 0 &&
		   ((size & SEG_MASK) == 0 || size > SEG_SIZE * 16)) {
		align = SEG_SIZE;
	} else {
		align = PAGE_SIZE;
	}

	if ((flags & (MAP_FIXED | MAP_TRYFIXED)) == 0) {
		fitit = TRUE;
		*addr = round_page(*addr);
	} else {
		if (*addr != trunc_page(*addr)) {
			lwkt_reltoken(&map->token);
			return (EINVAL);
		}
		eaddr = *addr + size;
		if (eaddr < *addr) {
			lwkt_reltoken(&map->token);
			return (EINVAL);
		}
		fitit = FALSE;
		if ((flags & MAP_TRYFIXED) == 0)
			vm_map_remove(map, *addr, *addr + size);
	}

	uksmap = NULL;

	/*
	 * Lookup/allocate object.
	 */
	if (flags & MAP_ANON) {
		/*
		 * Unnamed anonymous regions always start at 0.
		 */
		if (handle) {
			/*
			 * Default memory object
			 */
			object = default_pager_alloc(handle, objsize,
						     prot, foff);
			if (object == NULL) {
				lwkt_reltoken(&map->token);
				return(ENOMEM);
			}
			docow = MAP_PREFAULT_PARTIAL;
		} else {
			/*
			 * Implicit single instance of a default memory
			 * object, so we don't need a VM object yet.
			 */
			foff = 0;
			object = NULL;
			docow = 0;
		}
		vp = NULL;
	} else {
		vp = (struct vnode *)handle;

		/*
		 * Non-anonymous mappings of VCHR (aka not /dev/zero)
		 * cannot specify MAP_STACK or MAP_VPAGETABLE.
		 */
		if (vp->v_type == VCHR) {
			if (flags & (MAP_STACK | MAP_VPAGETABLE)) {
				lwkt_reltoken(&map->token);
				return(EINVAL);
			}
		}

		if (vp->v_type == VCHR && vp->v_rdev->si_ops->d_uksmap) {
			/*
			 * Device mappings without a VM object, typically
			 * sharing permanently allocated kernel memory or
			 * process-context-specific (per-process) data.
			 *
			 * Force them to be shared.
			 */
			uksmap = vp->v_rdev->si_ops->d_uksmap;
			object = NULL;
			docow = MAP_PREFAULT_PARTIAL;
			flags &= ~(MAP_PRIVATE|MAP_COPY);
			flags |= MAP_SHARED;
		} else if (vp->v_type == VCHR) {
			/*
			 * Device mappings (device size unknown?).
			 * Force them to be shared.
			 */
			error = dev_dmmap_single(vp->v_rdev, &foff, objsize,
						&object, prot, NULL);

			if (error == ENODEV) {
				handle = (void *)(intptr_t)vp->v_rdev;
				object = dev_pager_alloc(handle, objsize, prot, foff);
				if (object == NULL) {
					lwkt_reltoken(&map->token);
					return(EINVAL);
				}
			} else if (error) {
				lwkt_reltoken(&map->token);
				return(error);
			}

			docow = MAP_PREFAULT_PARTIAL;
			flags &= ~(MAP_PRIVATE|MAP_COPY);
			flags |= MAP_SHARED;
		} else {
			/*
			 * Regular file mapping (typically).  The attribute
			 * check is for the link count test only.  mmapable
			 * vnodes must already have a VM object assigned.
			 */
			struct vattr vat;
			int error;

			error = VOP_GETATTR(vp, &vat);
			if (error) {
				lwkt_reltoken(&map->token);
				return (error);
			}
			docow = MAP_PREFAULT_PARTIAL;
			object = vnode_pager_reference(vp);
			if (object == NULL && vp->v_type == VREG) {
				lwkt_reltoken(&map->token);
				kprintf("Warning: cannot mmap vnode %p, no "
					"object\n", vp);
				return(EINVAL);
			}

			/*
			 * If it is a regular file without any references
			 * we do not need to sync it.
			 */
			if (vp->v_type == VREG && vat.va_nlink == 0) {
				flags |= MAP_NOSYNC;
			}
		}
	}

	/*
	 * Deal with the adjusted flags
	 */
	if ((flags & (MAP_ANON|MAP_SHARED)) == 0)
		docow |= MAP_COPY_ON_WRITE;
	if (flags & MAP_NOSYNC)
		docow |= MAP_DISABLE_SYNCER;
	if (flags & MAP_NOCORE)
		docow |= MAP_DISABLE_COREDUMP;

#if defined(VM_PROT_READ_IS_EXEC)
	if (prot & VM_PROT_READ)
		prot |= VM_PROT_EXECUTE;

	if (maxprot & VM_PROT_READ)
		maxprot |= VM_PROT_EXECUTE;
#endif

	/*
	 * This may place the area in its own page directory if (size) is
	 * large enough, otherwise it typically returns its argument.
	 *
	 * (object can be NULL)
	 */
	if (fitit) {
		*addr = pmap_addr_hint(object, *addr, size);
	}

	/*
	 * Stack mappings need special attention.
	 *
	 * Mappings that use virtual page tables will default to storing
	 * the page table at offset 0.
	 */
	if (uksmap) {
		rv = vm_map_find(map, uksmap, vp->v_rdev,
				 foff, addr, size,
				 align,
				 fitit, VM_MAPTYPE_UKSMAP,
				 prot, maxprot, docow);
	} else if (flags & MAP_STACK) {
		rv = vm_map_stack(map, *addr, size, flags,
				  prot, maxprot, docow);
	} else if (flags & MAP_VPAGETABLE) {
		rv = vm_map_find(map, object, NULL,
				 foff, addr, size,
				 align,
				 fitit, VM_MAPTYPE_VPAGETABLE,
				 prot, maxprot, docow);
	} else {
		rv = vm_map_find(map, object, NULL,
				 foff, addr, size,
				 align,
				 fitit, VM_MAPTYPE_NORMAL,
				 prot, maxprot, docow);
	}

	if (rv != KERN_SUCCESS) {
		/*
		 * Lose the object reference. Will destroy the
		 * object if it's an unnamed anonymous mapping
		 * or named anonymous without other references.
		 *
		 * (NOTE: object can be NULL)
		 */
		vm_object_deallocate(object);
		goto out;
	}

	/*
	 * Shared memory is also shared with children.
	 */
	if (flags & (MAP_SHARED|MAP_INHERIT)) {
		rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE);
		if (rv != KERN_SUCCESS) {
			vm_map_remove(map, *addr, *addr + size);
			goto out;
		}
	}

	/* If a process has marked all future mappings for wiring, do so */
	if ((rv == KERN_SUCCESS) && (map->flags & MAP_WIREFUTURE))
		vm_map_unwire(map, *addr, *addr + size, FALSE);

	/*
	 * Set the access time on the vnode
	 */
	if (vp != NULL)
		vn_mark_atime(vp, td);
out:
	lwkt_reltoken(&map->token);
	
	switch (rv) {
	case KERN_SUCCESS:
		return (0);
	case KERN_INVALID_ADDRESS:
	case KERN_NO_SPACE:
		return (ENOMEM);
	case KERN_PROTECTION_FAILURE:
		return (EACCES);
	default:
		return (EINVAL);
	}
}
Exemple #20
0
static int
link_elf_load_file(const char* filename, linker_file_t* result)
{
    struct nlookupdata nd;
    struct thread *td = curthread;	/* XXX */
    struct proc *p = td->td_proc;
    struct vnode *vp;
    Elf_Ehdr *hdr;
    caddr_t firstpage;
    int nbytes, i;
    Elf_Phdr *phdr;
    Elf_Phdr *phlimit;
    Elf_Phdr *segs[2];
    int nsegs;
    Elf_Phdr *phdyn;
    Elf_Phdr *phphdr;
    caddr_t mapbase;
    size_t mapsize;
    Elf_Off base_offset;
    Elf_Addr base_vaddr;
    Elf_Addr base_vlimit;
    int error = 0;
    int resid;
    elf_file_t ef;
    linker_file_t lf;
    char *pathname;
    Elf_Shdr *shdr;
    int symtabindex;
    int symstrindex;
    int symcnt;
    int strcnt;

    /* XXX Hack for firmware loading where p == NULL */
    if (p == NULL) {
	p = &proc0;
    }

    KKASSERT(p != NULL);
    if (p->p_ucred == NULL) {
	kprintf("link_elf_load_file: cannot load '%s' from filesystem"
		" this early\n", filename);
	return ENOENT;
    }
    shdr = NULL;
    lf = NULL;
    pathname = linker_search_path(filename);
    if (pathname == NULL)
	return ENOENT;

    error = nlookup_init(&nd, pathname, UIO_SYSSPACE, NLC_FOLLOW|NLC_LOCKVP);
    if (error == 0)
	error = vn_open(&nd, NULL, FREAD, 0);
    kfree(pathname, M_LINKER);
    if (error) {
	nlookup_done(&nd);
	return error;
    }
    vp = nd.nl_open_vp;
    nd.nl_open_vp = NULL;
    nlookup_done(&nd);

    /*
     * Read the elf header from the file.
     */
    firstpage = kmalloc(PAGE_SIZE, M_LINKER, M_WAITOK);
    hdr = (Elf_Ehdr *)firstpage;
    error = vn_rdwr(UIO_READ, vp, firstpage, PAGE_SIZE, 0,
		    UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid);
    nbytes = PAGE_SIZE - resid;
    if (error)
	goto out;

    if (!IS_ELF(*hdr)) {
	error = ENOEXEC;
	goto out;
    }

    if (hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS
      || hdr->e_ident[EI_DATA] != ELF_TARG_DATA) {
	link_elf_error("Unsupported file layout");
	error = ENOEXEC;
	goto out;
    }
    if (hdr->e_ident[EI_VERSION] != EV_CURRENT
      || hdr->e_version != EV_CURRENT) {
	link_elf_error("Unsupported file version");
	error = ENOEXEC;
	goto out;
    }
    if (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN) {
	error = ENOSYS;
	goto out;
    }
    if (hdr->e_machine != ELF_TARG_MACH) {
	link_elf_error("Unsupported machine");
	error = ENOEXEC;
	goto out;
    }

    /*
     * We rely on the program header being in the first page.  This is
     * not strictly required by the ABI specification, but it seems to
     * always true in practice.  And, it simplifies things considerably.
     */
    if (!((hdr->e_phentsize == sizeof(Elf_Phdr)) &&
	  (hdr->e_phoff + hdr->e_phnum*sizeof(Elf_Phdr) <= PAGE_SIZE) &&
	  (hdr->e_phoff + hdr->e_phnum*sizeof(Elf_Phdr) <= nbytes)))
	link_elf_error("Unreadable program headers");

    /*
     * Scan the program header entries, and save key information.
     *
     * We rely on there being exactly two load segments, text and data,
     * in that order.
     */
    phdr = (Elf_Phdr *) (firstpage + hdr->e_phoff);
    phlimit = phdr + hdr->e_phnum;
    nsegs = 0;
    phdyn = NULL;
    phphdr = NULL;
    while (phdr < phlimit) {
	switch (phdr->p_type) {

	case PT_LOAD:
	    if (nsegs == 2) {
		link_elf_error("Too many sections");
		error = ENOEXEC;
		goto out;
	    }
	    segs[nsegs] = phdr;
	    ++nsegs;
	    break;

	case PT_PHDR:
	    phphdr = phdr;
	    break;

	case PT_DYNAMIC:
	    phdyn = phdr;
	    break;

	case PT_INTERP:
	    error = ENOSYS;
	    goto out;
	}

	++phdr;
    }
    if (phdyn == NULL) {
	link_elf_error("Object is not dynamically-linked");
	error = ENOEXEC;
	goto out;
    }

    /*
     * Allocate the entire address space of the object, to stake out our
     * contiguous region, and to establish the base address for relocation.
     */
    base_offset = trunc_page(segs[0]->p_offset);
    base_vaddr = trunc_page(segs[0]->p_vaddr);
    base_vlimit = round_page(segs[1]->p_vaddr + segs[1]->p_memsz);
    mapsize = base_vlimit - base_vaddr;

    ef = kmalloc(sizeof(struct elf_file), M_LINKER, M_WAITOK | M_ZERO);
#ifdef SPARSE_MAPPING
    ef->object = vm_object_allocate(OBJT_DEFAULT, mapsize >> PAGE_SHIFT);
    if (ef->object == NULL) {
	kfree(ef, M_LINKER);
	error = ENOMEM;
	goto out;
    }
    vm_object_hold(ef->object);
    vm_object_reference_locked(ef->object);
    ef->address = (caddr_t)vm_map_min(&kernel_map);
    error = vm_map_find(&kernel_map, ef->object, 0,
			(vm_offset_t *)&ef->address,
			mapsize, PAGE_SIZE,
			1, VM_MAPTYPE_NORMAL,
			VM_PROT_ALL, VM_PROT_ALL,
			0);
    vm_object_drop(ef->object);
    if (error) {
	vm_object_deallocate(ef->object);
	kfree(ef, M_LINKER);
	goto out;
    }
#else
    ef->address = kmalloc(mapsize, M_LINKER, M_WAITOK);
#endif
    mapbase = ef->address;

    /*
     * Read the text and data sections and zero the bss.
     */
    for (i = 0; i < 2; i++) {
	caddr_t segbase = mapbase + segs[i]->p_vaddr - base_vaddr;
	error = vn_rdwr(UIO_READ, vp,
			segbase, segs[i]->p_filesz, segs[i]->p_offset,
			UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid);
	if (error) {
#ifdef SPARSE_MAPPING
	    vm_map_remove(&kernel_map, (vm_offset_t) ef->address,
			  (vm_offset_t) ef->address
			  + (ef->object->size << PAGE_SHIFT));
	    vm_object_deallocate(ef->object);
#else
	    kfree(ef->address, M_LINKER);
#endif
	    kfree(ef, M_LINKER);
	    goto out;
	}
	bzero(segbase + segs[i]->p_filesz,
	      segs[i]->p_memsz - segs[i]->p_filesz);

#ifdef SPARSE_MAPPING
	/*
	 * Wire down the pages
	 */
	vm_map_wire(&kernel_map,
		    (vm_offset_t) segbase,
		    (vm_offset_t) segbase + segs[i]->p_memsz,
		    0);
#endif
    }

    ef->dynamic = (const Elf_Dyn *) (mapbase + phdyn->p_vaddr - base_vaddr);

    lf = linker_make_file(filename, ef, &link_elf_file_ops);
    if (lf == NULL) {
#ifdef SPARSE_MAPPING
	vm_map_remove(&kernel_map, (vm_offset_t) ef->address,
		      (vm_offset_t) ef->address
		      + (ef->object->size << PAGE_SHIFT));
	vm_object_deallocate(ef->object);
#else
	kfree(ef->address, M_LINKER);
#endif
	kfree(ef, M_LINKER);
	error = ENOMEM;
	goto out;
    }
    lf->address = ef->address;
    lf->size = mapsize;

    error = parse_dynamic(lf);
    if (error)
	goto out;
    link_elf_reloc_local(lf);
    error = linker_load_dependencies(lf);
    if (error)
	goto out;
    error = relocate_file(lf);
    if (error)
	goto out;

    /* Try and load the symbol table if it's present.  (you can strip it!) */
    nbytes = hdr->e_shnum * hdr->e_shentsize;
    if (nbytes == 0 || hdr->e_shoff == 0)
	goto nosyms;
    shdr = kmalloc(nbytes, M_LINKER, M_WAITOK | M_ZERO);
    error = vn_rdwr(UIO_READ, vp,
		    (caddr_t)shdr, nbytes, hdr->e_shoff,
		    UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid);
    if (error)
	goto out;
    symtabindex = -1;
    symstrindex = -1;
    for (i = 0; i < hdr->e_shnum; i++) {
	if (shdr[i].sh_type == SHT_SYMTAB) {
	    symtabindex = i;
	    symstrindex = shdr[i].sh_link;
	}
    }
    if (symtabindex < 0 || symstrindex < 0)
	goto nosyms;

    symcnt = shdr[symtabindex].sh_size;
    ef->symbase = kmalloc(symcnt, M_LINKER, M_WAITOK);
    strcnt = shdr[symstrindex].sh_size;
    ef->strbase = kmalloc(strcnt, M_LINKER, M_WAITOK);
    error = vn_rdwr(UIO_READ, vp,
		    ef->symbase, symcnt, shdr[symtabindex].sh_offset,
		    UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid);
    if (error)
	goto out;
    error = vn_rdwr(UIO_READ, vp,
		    ef->strbase, strcnt, shdr[symstrindex].sh_offset,
		    UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid);
    if (error)
	goto out;

    ef->ddbsymcnt = symcnt / sizeof(Elf_Sym);
    ef->ddbsymtab = (const Elf_Sym *)ef->symbase;
    ef->ddbstrcnt = strcnt;
    ef->ddbstrtab = ef->strbase;

nosyms:

    *result = lf;

out:
    if (error && lf)
	linker_file_unload(lf);
    if (shdr)
	kfree(shdr, M_LINKER);
    if (firstpage)
	kfree(firstpage, M_LINKER);
    vn_unlock(vp);
    vn_close(vp, FREAD);

    return error;
}
Exemple #21
0
/**
 * Worker for the two virtual address space reservers.
 *
 * We're leaning on the examples provided by mmap and vm_mmap in vm_mmap.c here.
 */
static int rtR0MemObjNativeReserveInMap(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process, vm_map_t pMap)
{
    int rc;

    /*
     * The pvFixed address range must be within the VM space when specified.
     */
    if (   pvFixed != (void *)-1
        && (    (vm_offset_t)pvFixed      < vm_map_min(pMap)
            ||  (vm_offset_t)pvFixed + cb > vm_map_max(pMap)))
        return VERR_INVALID_PARAMETER;

    /*
     * Check that the specified alignment is supported.
     */
    if (uAlignment > PAGE_SIZE)
        return VERR_NOT_SUPPORTED;

    /*
     * Create the object.
     */
    PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb);
    if (!pMemFreeBSD)
        return VERR_NO_MEMORY;

    vm_offset_t MapAddress = pvFixed != (void *)-1
                           ? (vm_offset_t)pvFixed
                           : vm_map_min(pMap);
    if (pvFixed != (void *)-1)
        vm_map_remove(pMap,
                      MapAddress,
                      MapAddress + cb);

    rc = vm_map_find(pMap,                          /* map */
                     NULL,                          /* object */
                     0,                             /* offset */
                     &MapAddress,                   /* addr (IN/OUT) */
                     cb,                            /* length */
#if __FreeBSD_version >= 1000055
                     0,                             /* max addr */
#endif
                     pvFixed == (void *)-1 ? VMFS_ANY_SPACE : VMFS_NO_SPACE,
                                                    /* find_space */
                     VM_PROT_NONE,                  /* protection */
                     VM_PROT_ALL,                   /* max(_prot) ?? */
                     0);                            /* cow (copy-on-write) */
    if (rc == KERN_SUCCESS)
    {
        if (R0Process != NIL_RTR0PROCESS)
        {
            rc = vm_map_inherit(pMap,
                                MapAddress,
                                MapAddress + cb,
                                VM_INHERIT_SHARE);
            AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));
        }
        pMemFreeBSD->Core.pv = (void *)MapAddress;
        pMemFreeBSD->Core.u.ResVirt.R0Process = R0Process;
        *ppMem = &pMemFreeBSD->Core;
        return VINF_SUCCESS;
    }

    rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */
    rtR0MemObjDelete(&pMemFreeBSD->Core);
    return rc;

}
Exemple #22
0
boolean_t gzalloc_free(zone_t zone, void *addr) {
	boolean_t gzfreed = FALSE;
	kern_return_t kr;

	if (__improbable(gzalloc_mode &&
		(((zone->elem_size >= gzalloc_min) &&
		    (zone->elem_size <= gzalloc_max))) &&
		(zone->gzalloc_exempt == 0))) {
		gzhdr_t *gzh;
		vm_offset_t rounded_size = round_page(zone->elem_size + GZHEADER_SIZE);
		vm_offset_t residue = rounded_size - zone->elem_size;
		vm_offset_t saddr;
		vm_offset_t free_addr = 0;

		if (gzalloc_uf_mode) {
			gzh = (gzhdr_t *)((vm_offset_t)addr + zone->elem_size);
			saddr = (vm_offset_t) addr - PAGE_SIZE;
		} else {
			gzh = (gzhdr_t *)((vm_offset_t)addr - GZHEADER_SIZE);
			saddr = ((vm_offset_t)addr) - residue;
		}

		assert((saddr & PAGE_MASK) == 0);

		if (gzalloc_consistency_checks) {
			if (gzh->gzsig != GZALLOC_SIGNATURE) {
				panic("GZALLOC signature mismatch for element %p, expected 0x%x, found 0x%x", addr, GZALLOC_SIGNATURE, gzh->gzsig);
			}

			if (gzh->gzone != zone && (gzh->gzone != GZDEADZONE))
				panic("%s: Mismatched zone or under/overflow, current zone: %p, recorded zone: %p, address: %p", __FUNCTION__, zone, gzh->gzone, (void *)addr);
			/* Partially redundant given the zone check, but may flag header corruption */
			if (gzh->gzsize != zone->elem_size) {
				panic("Mismatched zfree or under/overflow for zone %p, recorded size: 0x%x, element size: 0x%x, address: %p\n", zone, gzh->gzsize, (uint32_t) zone->elem_size, (void *)addr);
			}
		}

		if (!kmem_ready || gzh->gzone == GZDEADZONE) {
			/* For now, just leak frees of early allocations
			 * performed before kmem is fully configured.
			 * They don't seem to get freed currently;
			 * consider ml_static_mfree in the future.
			 */
			OSAddAtomic64((SInt32) (rounded_size), &gzalloc_early_free);
			return TRUE;
		}

		if (get_preemption_level() != 0) {
				pdzfree_count++;
		}

		if (gzfc_size) {
			/* Either write protect or unmap the newly freed
			 * allocation
			 */
			kr = vm_map_protect(
				gzalloc_map,
				saddr,
				saddr + rounded_size + (1 * PAGE_SIZE),
				gzalloc_prot,
				FALSE);
			if (kr != KERN_SUCCESS)
				panic("%s: vm_map_protect: %p, 0x%x", __FUNCTION__, (void *)saddr, kr);
		} else {
			free_addr = saddr;
		}

		lock_zone(zone);

		/* Insert newly freed element into the protected free element
		 * cache, and rotate out the LRU element.
		 */
		if (gzfc_size) {
			if (zone->gz.gzfc_index >= gzfc_size) {
				zone->gz.gzfc_index = 0;
			}
			free_addr = zone->gz.gzfc[zone->gz.gzfc_index];
			zone->gz.gzfc[zone->gz.gzfc_index++] = saddr;
		}

		if (free_addr) {
			zone->count--;
			zone->cur_size -= rounded_size;
		}

		unlock_zone(zone);

		if (free_addr) {
			kr = vm_map_remove(
				gzalloc_map,
				free_addr,
				free_addr + rounded_size + (1 * PAGE_SIZE),
				VM_MAP_REMOVE_KUNWIRE);
			if (kr != KERN_SUCCESS)
				panic("gzfree: vm_map_remove: %p, 0x%x", (void *)free_addr, kr);

			OSAddAtomic64((SInt32)rounded_size, &gzalloc_freed);
			OSAddAtomic64(-((SInt32) (rounded_size - zone->elem_size)), &gzalloc_wasted);
		}

		gzfreed = TRUE;
	}
	return gzfreed;
}
Exemple #23
0
static int
load_coff_section(struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset,
		  caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot)
{
	size_t map_len;
	vm_offset_t map_offset;
	vm_offset_t map_addr;
	int error;
	unsigned char *data_buf = 0;
	size_t copy_len;

	map_offset = trunc_page(offset);
	map_addr = trunc_page((vm_offset_t)vmaddr);

	if (memsz > filsz) {
		/*
		 * We have the stupid situation that
		 * the section is longer than it is on file,
		 * which means it has zero-filled areas, and
		 * we have to work for it.  Stupid iBCS!
		 */
		map_len = trunc_page(offset + filsz) - trunc_page(map_offset);
	} else {
		/*
		 * The only stuff we care about is on disk, and we
		 * don't care if we map in more than is really there.
		 */
		map_len = round_page(offset + filsz) - trunc_page(map_offset);
	}

	DPRINTF(("%s(%d):  vm_mmap(&vmspace->vm_map, &0x%08lx, 0x%x, 0x%x, "
		"VM_PROT_ALL, MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, 0x%x)\n",
		__FILE__, __LINE__, map_addr, map_len, prot, map_offset));

	if ((error = vm_mmap(&vmspace->vm_map,
			     &map_addr,
			     map_len,
			     prot,
			     VM_PROT_ALL,
			     MAP_PRIVATE | MAP_FIXED,
			     OBJT_VNODE,
			     vp,
			     map_offset)) != 0)
		return error;

	if (memsz == filsz) {
		/* We're done! */
		return 0;
	}

	/*
	 * Now we have screwball stuff, to accomodate stupid COFF.
	 * We have to map the remaining bit of the file into the kernel's
	 * memory map, allocate some anonymous memory, copy that last
	 * bit into it, and then we're done. *sigh*
	 * For clean-up reasons, we actally map in the file last.
	 */

	copy_len = (offset + filsz) - trunc_page(offset + filsz);
	map_addr = trunc_page((vm_offset_t)vmaddr + filsz);
	map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr;

	DPRINTF(("%s(%d): vm_map_find(&vmspace->vm_map, NULL, 0, &0x%08lx,0x%x, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0)\n", __FILE__, __LINE__, map_addr, map_len));

	if (map_len != 0) {
		error = vm_map_find(&vmspace->vm_map, NULL, 0, &map_addr,
				    map_len, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0);
		if (error)
			return error;
	}

	if ((error = vm_mmap(kernel_map,
			    (vm_offset_t *) &data_buf,
			    PAGE_SIZE,
			    VM_PROT_READ,
			    VM_PROT_READ,
			    0,
			    OBJT_VNODE,
			    vp,
			    trunc_page(offset + filsz))) != 0)
		return error;

	error = copyout(data_buf, (caddr_t) map_addr, copy_len);

	if (vm_map_remove(kernel_map,
			  (vm_offset_t) data_buf,
			  (vm_offset_t) data_buf + PAGE_SIZE))
		panic("load_coff_section vm_map_remove failed");

	return error;
}
Exemple #24
0
/*
 * Destroy old address space, and allocate a new stack.
 *	The new stack is only sgrowsiz large because it is grown
 *	automatically on a page fault.
 */
int
exec_new_vmspace(struct image_params *imgp, struct sysentvec *sv)
{
	int error;
	struct proc *p = imgp->proc;
	struct vmspace *vmspace = p->p_vmspace;
	vm_object_t obj;
	struct rlimit rlim_stack;
	vm_offset_t sv_minuser, stack_addr;
	vm_map_t map;
	u_long ssiz;

	imgp->vmspace_destroyed = 1;
	imgp->sysent = sv;

	/* May be called with Giant held */
	EVENTHANDLER_DIRECT_INVOKE(process_exec, p, imgp);

	/*
	 * Blow away entire process VM, if address space not shared,
	 * otherwise, create a new VM space so that other threads are
	 * not disrupted
	 */
	map = &vmspace->vm_map;
	if (map_at_zero)
		sv_minuser = sv->sv_minuser;
	else
		sv_minuser = MAX(sv->sv_minuser, PAGE_SIZE);
	if (vmspace->vm_refcnt == 1 && vm_map_min(map) == sv_minuser &&
	    vm_map_max(map) == sv->sv_maxuser &&
	    cpu_exec_vmspace_reuse(p, map)) {
		shmexit(vmspace);
		pmap_remove_pages(vmspace_pmap(vmspace));
		vm_map_remove(map, vm_map_min(map), vm_map_max(map));
		/*
		 * An exec terminates mlockall(MCL_FUTURE), ASLR state
		 * must be re-evaluated.
		 */
		vm_map_lock(map);
		vm_map_modflags(map, 0, MAP_WIREFUTURE | MAP_ASLR |
		    MAP_ASLR_IGNSTART);
		vm_map_unlock(map);
	} else {
		error = vmspace_exec(p, sv_minuser, sv->sv_maxuser);
		if (error)
			return (error);
		vmspace = p->p_vmspace;
		map = &vmspace->vm_map;
	}
	map->flags |= imgp->map_flags;

	/* Map a shared page */
	obj = sv->sv_shared_page_obj;
	if (obj != NULL) {
		vm_object_reference(obj);
		error = vm_map_fixed(map, obj, 0,
		    sv->sv_shared_page_base, sv->sv_shared_page_len,
		    VM_PROT_READ | VM_PROT_EXECUTE,
		    VM_PROT_READ | VM_PROT_EXECUTE,
		    MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE);
		if (error != KERN_SUCCESS) {
			vm_object_deallocate(obj);
			return (vm_mmap_to_errno(error));
		}
	}

	/* Allocate a new stack */
	if (imgp->stack_sz != 0) {
		ssiz = trunc_page(imgp->stack_sz);
		PROC_LOCK(p);
		lim_rlimit_proc(p, RLIMIT_STACK, &rlim_stack);
		PROC_UNLOCK(p);
		if (ssiz > rlim_stack.rlim_max)
			ssiz = rlim_stack.rlim_max;
		if (ssiz > rlim_stack.rlim_cur) {
			rlim_stack.rlim_cur = ssiz;
			kern_setrlimit(curthread, RLIMIT_STACK, &rlim_stack);
		}
	} else if (sv->sv_maxssiz != NULL) {
		ssiz = *sv->sv_maxssiz;
	} else {
		ssiz = maxssiz;
	}
	stack_addr = sv->sv_usrstack - ssiz;
	error = vm_map_stack(map, stack_addr, (vm_size_t)ssiz,
	    obj != NULL && imgp->stack_prot != 0 ? imgp->stack_prot :
	    sv->sv_stackprot, VM_PROT_ALL, MAP_STACK_GROWS_DOWN);
	if (error != KERN_SUCCESS)
		return (vm_mmap_to_errno(error));

	/*
	 * vm_ssize and vm_maxsaddr are somewhat antiquated concepts, but they
	 * are still used to enforce the stack rlimit on the process stack.
	 */
	vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT;
	vmspace->vm_maxsaddr = (char *)stack_addr;

	return (0);
}
Exemple #25
0
DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
                                          unsigned fProt, size_t offSub, size_t cbSub)
{
//  AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
    AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);

    /*
     * Check that the specified alignment is supported.
     */
    if (uAlignment > PAGE_SIZE)
        return VERR_NOT_SUPPORTED;

    int                rc;
    PRTR0MEMOBJFREEBSD pMemToMapFreeBSD = (PRTR0MEMOBJFREEBSD)pMemToMap;

    /* calc protection */
    vm_prot_t       ProtectionFlags = 0;
    if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
        ProtectionFlags = VM_PROT_NONE;
    if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
        ProtectionFlags |= VM_PROT_READ;
    if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
        ProtectionFlags |= VM_PROT_WRITE;
    if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
        ProtectionFlags |= VM_PROT_EXECUTE;

    vm_offset_t  Addr = vm_map_min(kernel_map);
    if (cbSub == 0)
        cbSub = pMemToMap->cb - offSub;

    vm_object_reference(pMemToMapFreeBSD->pObject);
    rc = vm_map_find(kernel_map,            /* Map to insert the object in */
                     pMemToMapFreeBSD->pObject, /* Object to map */
                     offSub,                /* Start offset in the object */
                     &Addr,                 /* Start address IN/OUT */
                     cbSub,                 /* Size of the mapping */
#if __FreeBSD_version >= 1000055
                     0,                     /* Upper bound of mapping */
#endif
                     VMFS_ANY_SPACE,        /* Whether a suitable address should be searched for first */
                     ProtectionFlags,       /* protection flags */
                     VM_PROT_ALL,           /* Maximum protection flags */
                     0);                    /* copy-on-write and similar flags */

    if (rc == KERN_SUCCESS)
    {
        rc = vm_map_wire(kernel_map, Addr, Addr + cbSub, VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES);
        AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));

        PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(RTR0MEMOBJFREEBSD),
                                                                           RTR0MEMOBJTYPE_MAPPING,
                                                                           (void *)Addr,
                                                                           cbSub);
        if (pMemFreeBSD)
        {
            Assert((vm_offset_t)pMemFreeBSD->Core.pv == Addr);
            pMemFreeBSD->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
            *ppMem = &pMemFreeBSD->Core;
            return VINF_SUCCESS;
        }
        rc = vm_map_remove(kernel_map, Addr, Addr + cbSub);
        AssertMsg(rc == KERN_SUCCESS, ("Deleting mapping failed\n"));
    }
    else
        vm_object_deallocate(pMemToMapFreeBSD->pObject);

    return VERR_NO_MEMORY;
}
Exemple #26
0
void
vmm_mem_free(struct vmspace *vmspace, vm_paddr_t gpa, size_t len)
{

	vm_map_remove(&vmspace->vm_map, gpa, gpa + len);
}