示例#1
0
文件: test.c 项目: kdmurray91/libkdm
void
test_km_free(void *ptr)
{
    char *dat = strdup("test");
    /* Test freeing buffer */
    tt_ptr_op(dat, !=, NULL);
    km_free(dat);
    tt_ptr_op(dat, ==, NULL);
    /* This free(NULL) should not fail */
    km_free(dat);
    tt_ptr_op(dat, ==, NULL);
end:
    ;
}
示例#2
0
文件: dist.c 项目: kdmurray91/tableTK
void
destroy_distmat_t(dist_mat_t *dm)
{
    if ((dm) != NULL) {
        km_free((dm)->matrix);
        if ((dm)->sample_names) {
            size_t iii;
            for (iii = 0; iii < (dm)->samples; iii++) {
                km_free((dm)->sample_names[iii]);
            }
            km_free((dm)->sample_names);
        }
        free(dm);
    }
}
示例#3
0
void codepatch_unmaprw(vaddr_t nva)
{
	if (nva == 0)
		return;
	pmap_kremove(nva, 2 * PAGE_SIZE);
	km_free((void *)nva, 2 * PAGE_SIZE, &kv_any, &kp_none);
}
示例#4
0
void
mpbios_unmap(struct mp_map *handle)
{
    pmap_kremove(handle->baseva, handle->vsize);
    pmap_update(pmap_kernel());
    km_free((void *)handle->baseva, handle->vsize, &kv_any, &kp_none);
}
示例#5
0
/*
 * Common function for unmapping DMA-safe memory.  May be called by
 * bus-specific DMA memory unmapping functions.
 */
void
_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
{
	if (IS_XKPHYS((vaddr_t)kva))
		return;

	km_free(kva, round_page(size), &kv_any, &kp_none);
}
示例#6
0
/*
 * Common function for unmapping DMA-safe memory.  May be called by
 * bus-specific DMA memory unmapping functions.
 */
void
_bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
{

#ifdef DIAGNOSTIC
	if ((u_long)kva & PGOFSET)
		panic("_bus_dmamem_unmap");
#endif

	km_free(kva, round_page(size), &kv_any, &kp_none);
}
示例#7
0
void
i80321_mem_bs_unmap(void *t, bus_space_handle_t bsh, bus_size_t size)
{
	vaddr_t va, endva;

	va = trunc_page((vaddr_t)bsh);
	endva = round_page(va + size);

	pmap_kremove(va, endva - va);
	pmap_update(pmap_kernel());
	km_free((void *)va, endva - va, &kv_any, &kp_none);
}
示例#8
0
void
pipe_free_kmem(struct pipe *cpipe)
{
	if (cpipe->pipe_buffer.buffer != NULL) {
		if (cpipe->pipe_buffer.size > PIPE_SIZE)
			--nbigpipe;
		amountpipekva -= cpipe->pipe_buffer.size;
		km_free(cpipe->pipe_buffer.buffer, cpipe->pipe_buffer.size,
		    &kv_any, &kp_pageable);
		cpipe->pipe_buffer.buffer = NULL;
	}
}
示例#9
0
/*
 * Common function for unmapping DMA-safe memory.  May be called by
 * bus-specific DMA memory unmapping functions.
 */
void
_bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
{

#ifdef DIAGNOSTIC
	if ((u_long)kva & PGOFSET)
		panic("_bus_dmamem_unmap");
#endif
	if (kva >= (caddr_t)PMAP_DIRECT_BASE && kva <= (caddr_t)PMAP_DIRECT_END)
		return;

	km_free(kva, round_page(size), &kv_any, &kp_none);
}
示例#10
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	bus_addr_t addr;
	int curseg, pmapflags = 0, ret;
	const struct kmem_dyn_mode *kd;

	if (flags & BUS_DMA_NOCACHE)
		pmapflags |= PMAP_NOCACHE;

	size = round_page(size);
	kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
	va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, kd);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			/*
			 * we don't want pmap to panic here if it can't
			 * alloc
			 */
			ret = pmap_enter(pmap_kernel(), va, addr | pmapflags,
			    PROT_READ | PROT_WRITE,
			    PROT_READ | PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
			if (ret) {
				pmap_update(pmap_kernel());
				km_free((void *)sva, ssize, &kv_any, &kp_none);
				return (ret);
			}

		}
	}
	pmap_update(pmap_kernel());

	return (0);
}
示例#11
0
/*
 * Common function for unmapping DMA-safe memory.  May be called by
 * bus-specific DMA memory unmapping functions.
 */
void
_bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
{

#ifdef DEBUG_DMA
	printf("dmamem_unmap: t=%p kva=%p size=%lx\n", t, kva,
	    (unsigned long)size);
#endif	/* DEBUG_DMA */
#ifdef DIAGNOSTIC
	if ((u_long)kva & PGOFSET)
		panic("_bus_dmamem_unmap");
#endif	/* DIAGNOSTIC */

	size = round_page(size);
	km_free(kva, round_page(size), &kv_any, &kp_none);
}
示例#12
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	bus_addr_t addr;
	int curseg, pmapflags = 0, error;

	if (nsegs == 1 && (flags & BUS_DMA_NOCACHE) == 0) {
		*kvap = (caddr_t)PMAP_DIRECT_MAP(segs[0].ds_addr);
		return (0);
	}

	if (flags & BUS_DMA_NOCACHE)
		pmapflags |= PMAP_NOCACHE;

	size = round_page(size);
	va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, &kd_nowait);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			error = pmap_enter(pmap_kernel(), va, addr | pmapflags,
			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
			    VM_PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
			if (error) {
				pmap_update(pmap_kernel());
				km_free((void *)sva, ssize, &kv_any, &kp_none);
				return (error);
			}
		}
	}
	pmap_update(pmap_kernel());

	return (0);
}
示例#13
0
void
i80321_bs_unmap(void *t, bus_space_handle_t bsh, bus_size_t size)
{
	vaddr_t va, endva;

	if (pmap_devmap_find_va(bsh, size) != NULL) {
		/* Device was statically mapped; nothing to do. */
		return;
	}

	va = trunc_page((vaddr_t)bsh);
	endva = round_page(bsh + size);

	pmap_kremove(va, endva - va);
	pmap_update(pmap_kernel());
	km_free((void *)va, endva - va, &kv_any, &kp_none);
}
示例#14
0
/*
 * Release a pager map segment.
 *
 * Caller does not lock.
 *
 * Deallocates pseg if it is no longer in use.
 */
void
uvm_pseg_release(vaddr_t segaddr)
{
	int id;
	struct uvm_pseg *pseg;
	vaddr_t va = 0;

	for (pseg = &psegs[0]; pseg != &psegs[PSEG_NUMSEGS]; pseg++) {
		if (pseg->start <= segaddr &&
		    segaddr < pseg->start + MAX_PAGER_SEGS * MAXBSIZE)
			break;
	}
	KASSERT(pseg != &psegs[PSEG_NUMSEGS]);

	id = (segaddr - pseg->start) / MAXBSIZE;
	KASSERT(id >= 0 && id < MAX_PAGER_SEGS);

	/* test for no remainder */
	KDASSERT(segaddr == pseg->start + id * MAXBSIZE);

	mtx_enter(&uvm_pseg_lck);

	KASSERT(UVM_PSEG_INUSE(pseg, id));

	pseg->use &= ~(1 << id);
	wakeup(&psegs);

	if (pseg != &psegs[0] && UVM_PSEG_EMPTY(pseg)) {
		va = pseg->start;
		pseg->start = 0;
	}

	mtx_leave(&uvm_pseg_lck);

	if (va)
		km_free((void *)va, MAX_PAGER_SEGS * MAXBSIZE,
		    &kv_any, &kp_none);
}
示例#15
0
void
_bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
{

	DPRINTF(("bus_dmamem_unmap: t = %p, kva = %p, size = %d\n", t, kva, size));

#ifdef DIAGNOSTIC
	if ((u_long)kva & PAGE_MASK)
		panic("_bus_dmamem_unmap");
#endif

        /*
	 * Nothing to do if we mapped it with P[12]SEG.
	 */
	if ((kva >= (caddr_t)SH3_P1SEG_BASE)
	 && (kva <= (caddr_t)SH3_P2SEG_END)) {
		return;
	}

	size = round_page(size);
	pmap_kremove((vaddr_t)kva, size);
	pmap_update(pmap_kernel());
	km_free(kva, size, &kv_any, &kp_none);
}
示例#16
0
文件: dist.c 项目: kdmurray91/tableTK
int
process_header (table_t *tab, char *line)
{
    size_t col = 0;
    size_t sample = 0;
    size_t n_alloced = 1<<4;
    char **samples = km_calloc(n_alloced, sizeof(*samples),
            &km_onerr_print_exit);
    char *tok_line = strdup(line);
    char *np = NULL;
    char *tok = strtok_r(tok_line, tab->sep, &np);
    do{
        char *nl = NULL;
        char *tok_cpy = NULL;
        if (col++ < tab->skipcol) {
            tok = strtok_r(NULL, tab->sep, &np);
            continue;
        }
        if (sample + 1 >= n_alloced) {
            size_t newsz = kmroundupz(n_alloced);
            n_alloced = newsz;
            samples = km_realloc(samples, n_alloced * sizeof(*samples),
                    &km_onerr_print_exit);
        }
        tok_cpy = strdup(tok);
        nl = strchr(tok_cpy, '\n');
        if (nl != NULL) {
            nl[0] = '\0';
        }
        samples[sample++] = tok_cpy;
        tok = strtok_r(NULL, tab->sep, &np);
    } while (tok != NULL);
    km_free(tok_line);
    ((dist_mat_t *)(tab->data))->sample_names = samples;
    return 1;
}
示例#17
0
/* ARGSUSED */
int
sys_execve(struct proc *p, void *v, register_t *retval)
{
	struct sys_execve_args /* {
		syscallarg(const char *) path;
		syscallarg(char *const *) argp;
		syscallarg(char *const *) envp;
	} */ *uap = v;
	int error;
	struct exec_package pack;
	struct nameidata nid;
	struct vattr attr;
	struct ucred *cred = p->p_ucred;
	char *argp;
	char * const *cpp, *dp, *sp;
#ifdef KTRACE
	char *env_start;
#endif
	struct process *pr = p->p_p;
	long argc, envc;
	size_t len, sgap;
#ifdef MACHINE_STACK_GROWS_UP
	size_t slen;
#endif
	char *stack;
	struct ps_strings arginfo;
	struct vmspace *vm = pr->ps_vmspace;
	char **tmpfap;
	extern struct emul emul_native;
#if NSYSTRACE > 0
	int wassugid = ISSET(pr->ps_flags, PS_SUGID | PS_SUGIDEXEC);
	size_t pathbuflen;
#endif
	char *pathbuf = NULL;
	struct vnode *otvp;

	/* get other threads to stop */
	if ((error = single_thread_set(p, SINGLE_UNWIND, 1)))
		return (error);

	/*
	 * Cheap solution to complicated problems.
	 * Mark this process as "leave me alone, I'm execing".
	 */
	atomic_setbits_int(&pr->ps_flags, PS_INEXEC);

#if NSYSTRACE > 0
	if (ISSET(p->p_flag, P_SYSTRACE)) {
		systrace_execve0(p);
		pathbuf = pool_get(&namei_pool, PR_WAITOK);
		error = copyinstr(SCARG(uap, path), pathbuf, MAXPATHLEN,
		    &pathbuflen);
		if (error != 0)
			goto clrflag;
	}
#endif
	if (pathbuf != NULL) {
		NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_SYSSPACE, pathbuf, p);
	} else {
		NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_USERSPACE,
		    SCARG(uap, path), p);
	}

	/*
	 * initialize the fields of the exec package.
	 */
	if (pathbuf != NULL)
		pack.ep_name = pathbuf;
	else
		pack.ep_name = (char *)SCARG(uap, path);
	pack.ep_hdr = malloc(exec_maxhdrsz, M_EXEC, M_WAITOK);
	pack.ep_hdrlen = exec_maxhdrsz;
	pack.ep_hdrvalid = 0;
	pack.ep_ndp = &nid;
	pack.ep_interp = NULL;
	pack.ep_emul_arg = NULL;
	VMCMDSET_INIT(&pack.ep_vmcmds);
	pack.ep_vap = &attr;
	pack.ep_emul = &emul_native;
	pack.ep_flags = 0;

	/* see if we can run it. */
	if ((error = check_exec(p, &pack)) != 0) {
		goto freehdr;
	}

	/* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */

	/* allocate an argument buffer */
	argp = km_alloc(NCARGS, &kv_exec, &kp_pageable, &kd_waitok);
#ifdef DIAGNOSTIC
	if (argp == NULL)
		panic("execve: argp == NULL");
#endif
	dp = argp;
	argc = 0;

	/* copy the fake args list, if there's one, freeing it as we go */
	if (pack.ep_flags & EXEC_HASARGL) {
		tmpfap = pack.ep_fa;
		while (*tmpfap != NULL) {
			char *cp;

			cp = *tmpfap;
			while (*cp)
				*dp++ = *cp++;
			*dp++ = '\0';

			free(*tmpfap, M_EXEC, 0);
			tmpfap++; argc++;
		}
		free(pack.ep_fa, M_EXEC, 0);
		pack.ep_flags &= ~EXEC_HASARGL;
	}

	/* Now get argv & environment */
	if (!(cpp = SCARG(uap, argp))) {
		error = EFAULT;
		goto bad;
	}

	if (pack.ep_flags & EXEC_SKIPARG)
		cpp++;

	while (1) {
		len = argp + ARG_MAX - dp;
		if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
			goto bad;
		if (!sp)
			break;
		if ((error = copyinstr(sp, dp, len, &len)) != 0) {
			if (error == ENAMETOOLONG)
				error = E2BIG;
			goto bad;
		}
		dp += len;
		cpp++;
		argc++;
	}

	/* must have at least one argument */
	if (argc == 0) {
		error = EINVAL;
		goto bad;
	}

#ifdef KTRACE
	if (KTRPOINT(p, KTR_EXECARGS))
		ktrexec(p, KTR_EXECARGS, argp, dp - argp);
#endif

	envc = 0;
	/* environment does not need to be there */
	if ((cpp = SCARG(uap, envp)) != NULL ) {
#ifdef KTRACE
		env_start = dp;
#endif
		while (1) {
			len = argp + ARG_MAX - dp;
			if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
				goto bad;
			if (!sp)
				break;
			if ((error = copyinstr(sp, dp, len, &len)) != 0) {
				if (error == ENAMETOOLONG)
					error = E2BIG;
				goto bad;
			}
			dp += len;
			cpp++;
			envc++;
		}

#ifdef KTRACE
		if (KTRPOINT(p, KTR_EXECENV))
			ktrexec(p, KTR_EXECENV, env_start, dp - env_start);
#endif
	}

	dp = (char *)(((long)dp + _STACKALIGNBYTES) & ~_STACKALIGNBYTES);

	sgap = STACKGAPLEN;

	/*
	 * If we have enabled random stackgap, the stack itself has already
	 * been moved from a random location, but is still aligned to a page
	 * boundary.  Provide the lower bits of random placement now.
	 */
	if (stackgap_random != 0) {
		sgap += arc4random() & PAGE_MASK;
		sgap = (sgap + _STACKALIGNBYTES) & ~_STACKALIGNBYTES;
	}

	/* Now check if args & environ fit into new stack */
	len = ((argc + envc + 2 + pack.ep_emul->e_arglen) * sizeof(char *) +
	    sizeof(long) + dp + sgap + sizeof(struct ps_strings)) - argp;

	len = (len + _STACKALIGNBYTES) &~ _STACKALIGNBYTES;

	if (len > pack.ep_ssize) { /* in effect, compare to initial limit */
		error = ENOMEM;
		goto bad;
	}

	/* adjust "active stack depth" for process VSZ */
	pack.ep_ssize = len;	/* maybe should go elsewhere, but... */

	/*
	 * we're committed: any further errors will kill the process, so
	 * kill the other threads now.
	 */
	single_thread_set(p, SINGLE_EXIT, 0);

	/*
	 * Prepare vmspace for remapping. Note that uvmspace_exec can replace
	 * pr_vmspace!
	 */
	uvmspace_exec(p, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);

	vm = pr->ps_vmspace;
	/* Now map address space */
	vm->vm_taddr = (char *)trunc_page(pack.ep_taddr);
	vm->vm_tsize = atop(round_page(pack.ep_taddr + pack.ep_tsize) -
	    trunc_page(pack.ep_taddr));
	vm->vm_daddr = (char *)trunc_page(pack.ep_daddr);
	vm->vm_dsize = atop(round_page(pack.ep_daddr + pack.ep_dsize) -
	    trunc_page(pack.ep_daddr));
	vm->vm_dused = 0;
	vm->vm_ssize = atop(round_page(pack.ep_ssize));
	vm->vm_maxsaddr = (char *)pack.ep_maxsaddr;
	vm->vm_minsaddr = (char *)pack.ep_minsaddr;

	/* create the new process's VM space by running the vmcmds */
#ifdef DIAGNOSTIC
	if (pack.ep_vmcmds.evs_used == 0)
		panic("execve: no vmcmds");
#endif
	error = exec_process_vmcmds(p, &pack);

	/* if an error happened, deallocate and punt */
	if (error)
		goto exec_abort;

	/* old "stackgap" is gone now */
	pr->ps_stackgap = 0;

#ifdef MACHINE_STACK_GROWS_UP
	pr->ps_strings = (vaddr_t)vm->vm_maxsaddr + sgap;
        if (uvm_map_protect(&vm->vm_map, (vaddr_t)vm->vm_maxsaddr,
            trunc_page(pr->ps_strings), PROT_NONE, TRUE))
                goto exec_abort;
#else
	pr->ps_strings = (vaddr_t)vm->vm_minsaddr - sizeof(arginfo) - sgap;
        if (uvm_map_protect(&vm->vm_map,
            round_page(pr->ps_strings + sizeof(arginfo)),
            (vaddr_t)vm->vm_minsaddr, PROT_NONE, TRUE))
                goto exec_abort;
#endif

	/* remember information about the process */
	arginfo.ps_nargvstr = argc;
	arginfo.ps_nenvstr = envc;

#ifdef MACHINE_STACK_GROWS_UP
	stack = (char *)vm->vm_maxsaddr + sizeof(arginfo) + sgap;
	slen = len - sizeof(arginfo) - sgap;
#else
	stack = (char *)(vm->vm_minsaddr - len);
#endif
	/* Now copy argc, args & environ to new stack */
	if (!(*pack.ep_emul->e_copyargs)(&pack, &arginfo, stack, argp))
		goto exec_abort;

	/* copy out the process's ps_strings structure */
	if (copyout(&arginfo, (char *)pr->ps_strings, sizeof(arginfo)))
		goto exec_abort;

	stopprofclock(pr);	/* stop profiling */
	fdcloseexec(p);		/* handle close on exec */
	execsigs(p);		/* reset caught signals */
	TCB_SET(p, NULL);	/* reset the TCB address */
	pr->ps_kbind_addr = 0;	/* reset the kbind bits */
	pr->ps_kbind_cookie = 0;

	/* set command name & other accounting info */
	memset(p->p_comm, 0, sizeof(p->p_comm));
	len = min(nid.ni_cnd.cn_namelen, MAXCOMLEN);
	memcpy(p->p_comm, nid.ni_cnd.cn_nameptr, len);
	pr->ps_acflag &= ~AFORK;

	/* record proc's vnode, for use by sysctl */
	otvp = pr->ps_textvp;
	vref(pack.ep_vp);
	pr->ps_textvp = pack.ep_vp;
	if (otvp)
		vrele(otvp);

	atomic_setbits_int(&pr->ps_flags, PS_EXEC);
	if (pr->ps_flags & PS_PPWAIT) {
		atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT);
		atomic_clearbits_int(&pr->ps_pptr->ps_flags, PS_ISPWAIT);
		wakeup(pr->ps_pptr);
	}

	/*
	 * If process does execve() while it has a mismatched real,
	 * effective, or saved uid/gid, we set PS_SUGIDEXEC.
	 */
	if (cred->cr_uid != cred->cr_ruid ||
	    cred->cr_uid != cred->cr_svuid ||
	    cred->cr_gid != cred->cr_rgid ||
	    cred->cr_gid != cred->cr_svgid)
		atomic_setbits_int(&pr->ps_flags, PS_SUGIDEXEC);
	else
		atomic_clearbits_int(&pr->ps_flags, PS_SUGIDEXEC);

	atomic_clearbits_int(&pr->ps_flags, PS_TAMED);
	tame_dropwpaths(pr);

	/*
	 * deal with set[ug]id.
	 * MNT_NOEXEC has already been used to disable s[ug]id.
	 */
	if ((attr.va_mode & (VSUID | VSGID)) && proc_cansugid(p)) {
		int i;

		atomic_setbits_int(&pr->ps_flags, PS_SUGID|PS_SUGIDEXEC);

#ifdef KTRACE
		/*
		 * If process is being ktraced, turn off - unless
		 * root set it.
		 */
		if (pr->ps_tracevp && !(pr->ps_traceflag & KTRFAC_ROOT))
			ktrcleartrace(pr);
#endif
		p->p_ucred = cred = crcopy(cred);
		if (attr.va_mode & VSUID)
			cred->cr_uid = attr.va_uid;
		if (attr.va_mode & VSGID)
			cred->cr_gid = attr.va_gid;

		/*
		 * For set[ug]id processes, a few caveats apply to
		 * stdin, stdout, and stderr.
		 */
		error = 0;
		fdplock(p->p_fd);
		for (i = 0; i < 3; i++) {
			struct file *fp = NULL;

			/*
			 * NOTE - This will never return NULL because of
			 * immature fds. The file descriptor table is not
			 * shared because we're suid.
			 */
			fp = fd_getfile(p->p_fd, i);

			/*
			 * Ensure that stdin, stdout, and stderr are already
			 * allocated.  We do not want userland to accidentally
			 * allocate descriptors in this range which has implied
			 * meaning to libc.
			 */
			if (fp == NULL) {
				short flags = FREAD | (i == 0 ? 0 : FWRITE);
				struct vnode *vp;
				int indx;

				if ((error = falloc(p, &fp, &indx)) != 0)
					break;
#ifdef DIAGNOSTIC
				if (indx != i)
					panic("sys_execve: falloc indx != i");
#endif
				if ((error = cdevvp(getnulldev(), &vp)) != 0) {
					fdremove(p->p_fd, indx);
					closef(fp, p);
					break;
				}
				if ((error = VOP_OPEN(vp, flags, cred, p)) != 0) {
					fdremove(p->p_fd, indx);
					closef(fp, p);
					vrele(vp);
					break;
				}
				if (flags & FWRITE)
					vp->v_writecount++;
				fp->f_flag = flags;
				fp->f_type = DTYPE_VNODE;
				fp->f_ops = &vnops;
				fp->f_data = (caddr_t)vp;
				FILE_SET_MATURE(fp, p);
			}
		}
		fdpunlock(p->p_fd);
		if (error)
			goto exec_abort;
	} else
		atomic_clearbits_int(&pr->ps_flags, PS_SUGID);

	/*
	 * Reset the saved ugids and update the process's copy of the
	 * creds if the creds have been changed
	 */
	if (cred->cr_uid != cred->cr_svuid ||
	    cred->cr_gid != cred->cr_svgid) {
		/* make sure we have unshared ucreds */
		p->p_ucred = cred = crcopy(cred);
		cred->cr_svuid = cred->cr_uid;
		cred->cr_svgid = cred->cr_gid;
	}

	if (pr->ps_ucred != cred) {
		struct ucred *ocred;

		ocred = pr->ps_ucred;
		crhold(cred);
		pr->ps_ucred = cred;
		crfree(ocred);
	}

	if (pr->ps_flags & PS_SUGIDEXEC) {
		int i, s = splclock();

		timeout_del(&pr->ps_realit_to);
		for (i = 0; i < nitems(pr->ps_timer); i++) {
			timerclear(&pr->ps_timer[i].it_interval);
			timerclear(&pr->ps_timer[i].it_value);
		}
		splx(s);
	}

	/* reset CPU time usage for the thread, but not the process */
	timespecclear(&p->p_tu.tu_runtime);
	p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0;

	km_free(argp, NCARGS, &kv_exec, &kp_pageable);

	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
	vn_close(pack.ep_vp, FREAD, cred, p);

	/*
	 * notify others that we exec'd
	 */
	KNOTE(&pr->ps_klist, NOTE_EXEC);

	/* setup new registers and do misc. setup. */
	if (pack.ep_emul->e_fixup != NULL) {
		if ((*pack.ep_emul->e_fixup)(p, &pack) != 0)
			goto free_pack_abort;
	}
#ifdef MACHINE_STACK_GROWS_UP
	(*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack + slen, retval);
#else
	(*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack, retval);
#endif

	/* map the process's signal trampoline code */
	if (exec_sigcode_map(pr, pack.ep_emul))
		goto free_pack_abort;

#ifdef __HAVE_EXEC_MD_MAP
	/* perform md specific mappings that process might need */
	if (exec_md_map(p, &pack))
		goto free_pack_abort;
#endif

	if (pr->ps_flags & PS_TRACED)
		psignal(p, SIGTRAP);

	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);

	/*
	 * Call emulation specific exec hook. This can setup per-process
	 * p->p_emuldata or do any other per-process stuff an emulation needs.
	 *
	 * If we are executing process of different emulation than the
	 * original forked process, call e_proc_exit() of the old emulation
	 * first, then e_proc_exec() of new emulation. If the emulation is
	 * same, the exec hook code should deallocate any old emulation
	 * resources held previously by this process.
	 */
	if (pr->ps_emul && pr->ps_emul->e_proc_exit &&
	    pr->ps_emul != pack.ep_emul)
		(*pr->ps_emul->e_proc_exit)(p);

	p->p_descfd = 255;
	if ((pack.ep_flags & EXEC_HASFD) && pack.ep_fd < 255)
		p->p_descfd = pack.ep_fd;

	/*
	 * Call exec hook. Emulation code may NOT store reference to anything
	 * from &pack.
	 */
	if (pack.ep_emul->e_proc_exec)
		(*pack.ep_emul->e_proc_exec)(p, &pack);

#if defined(KTRACE) && defined(COMPAT_LINUX)
	/* update ps_emul, but don't ktrace it if native-execing-native */
	if (pr->ps_emul != pack.ep_emul || pack.ep_emul != &emul_native) {
		pr->ps_emul = pack.ep_emul;

		if (KTRPOINT(p, KTR_EMUL))
			ktremul(p);
	}
#else
	/* update ps_emul, the old value is no longer needed */
	pr->ps_emul = pack.ep_emul;
#endif

	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
	single_thread_clear(p, P_SUSPSIG);

#if NSYSTRACE > 0
	if (ISSET(p->p_flag, P_SYSTRACE) &&
	    wassugid && !ISSET(pr->ps_flags, PS_SUGID | PS_SUGIDEXEC))
		systrace_execve1(pathbuf, p);
#endif

	if (pathbuf != NULL)
		pool_put(&namei_pool, pathbuf);

	return (0);

bad:
	/* free the vmspace-creation commands, and release their references */
	kill_vmcmds(&pack.ep_vmcmds);
	/* kill any opened file descriptor, if necessary */
	if (pack.ep_flags & EXEC_HASFD) {
		pack.ep_flags &= ~EXEC_HASFD;
		fdplock(p->p_fd);
		(void) fdrelease(p, pack.ep_fd);
		fdpunlock(p->p_fd);
	}
	if (pack.ep_interp != NULL)
		pool_put(&namei_pool, pack.ep_interp);
	if (pack.ep_emul_arg != NULL)
		free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize);
	/* close and put the exec'd file */
	vn_close(pack.ep_vp, FREAD, cred, p);
	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
	km_free(argp, NCARGS, &kv_exec, &kp_pageable);

 freehdr:
	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
#if NSYSTRACE > 0
 clrflag:
#endif
	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
	single_thread_clear(p, P_SUSPSIG);

	if (pathbuf != NULL)
		pool_put(&namei_pool, pathbuf);

	return (error);

exec_abort:
	/*
	 * the old process doesn't exist anymore.  exit gracefully.
	 * get rid of the (new) address space we have created, if any, get rid
	 * of our namei data and vnode, and exit noting failure
	 */
	uvm_deallocate(&vm->vm_map, VM_MIN_ADDRESS,
		VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS);
	if (pack.ep_interp != NULL)
		pool_put(&namei_pool, pack.ep_interp);
	if (pack.ep_emul_arg != NULL)
		free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize);
	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
	vn_close(pack.ep_vp, FREAD, cred, p);
	km_free(argp, NCARGS, &kv_exec, &kp_pageable);

free_pack_abort:
	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
	if (pathbuf != NULL)
		pool_put(&namei_pool, pathbuf);
	exit1(p, W_EXITCODE(0, SIGABRT), EXIT_NORMAL);

	/* NOTREACHED */
	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);

	return (0);
}
示例#18
0
int
sti_rom_setup(struct sti_rom *rom, bus_space_tag_t iot, bus_space_tag_t memt,
    bus_space_handle_t romh, bus_addr_t *bases, u_int codebase)
{
	struct sti_dd *dd;
	int error, size, i;

	STI_ENABLE_ROM(rom->rom_softc);

	rom->iot = iot;
	rom->memt = memt;
	rom->romh = romh;
	rom->bases = bases;

	/*
	 * Get ROM header and code function pointers.
	 */

	dd = &rom->rom_dd;
	rom->rom_devtype = bus_space_read_1(memt, romh, 3);
	if (rom->rom_devtype == STI_DEVTYPE1) {
		dd->dd_type      = bus_space_read_1(memt, romh, 0x03);
		dd->dd_nmon      = bus_space_read_1(memt, romh, 0x07);
		dd->dd_grrev     = bus_space_read_1(memt, romh, 0x0b);
		dd->dd_lrrev     = bus_space_read_1(memt, romh, 0x0f);
		dd->dd_grid[0]   = parseword(0x10);
		dd->dd_grid[1]   = parseword(0x20);
		dd->dd_fntaddr   = parseword(0x30) & ~3;
		dd->dd_maxst     = parseword(0x40);
		dd->dd_romend    = parseword(0x50) & ~3;
		dd->dd_reglst    = parseword(0x60) & ~3;
		dd->dd_maxreent  = parseshort(0x70);
		dd->dd_maxtimo   = parseshort(0x78);
		dd->dd_montbl    = parseword(0x80) & ~3;
		dd->dd_udaddr    = parseword(0x90) & ~3;
		dd->dd_stimemreq = parseword(0xa0);
		dd->dd_udsize    = parseword(0xb0);
		dd->dd_pwruse    = parseshort(0xc0);
		dd->dd_bussup    = bus_space_read_1(memt, romh, 0xcb);
		dd->dd_ebussup   = bus_space_read_1(memt, romh, 0xcf);
		dd->dd_altcodet  = bus_space_read_1(memt, romh, 0xd3);
		dd->dd_eddst[0]  = bus_space_read_1(memt, romh, 0xd7);
		dd->dd_eddst[1]  = bus_space_read_1(memt, romh, 0xdb);
		dd->dd_eddst[2]  = bus_space_read_1(memt, romh, 0xdf);
		dd->dd_cfbaddr   = parseword(0xe0) & ~3;

		codebase <<= 2;
		dd->dd_pacode[0x0] = parseword(codebase + 0x000) & ~3;
		dd->dd_pacode[0x1] = parseword(codebase + 0x010) & ~3;
		dd->dd_pacode[0x2] = parseword(codebase + 0x020) & ~3;
		dd->dd_pacode[0x3] = parseword(codebase + 0x030) & ~3;
		dd->dd_pacode[0x4] = parseword(codebase + 0x040) & ~3;
		dd->dd_pacode[0x5] = parseword(codebase + 0x050) & ~3;
		dd->dd_pacode[0x6] = parseword(codebase + 0x060) & ~3;
		dd->dd_pacode[0x7] = parseword(codebase + 0x070) & ~3;
		dd->dd_pacode[0x8] = parseword(codebase + 0x080) & ~3;
		dd->dd_pacode[0x9] = parseword(codebase + 0x090) & ~3;
		dd->dd_pacode[0xa] = parseword(codebase + 0x0a0) & ~3;
		dd->dd_pacode[0xb] = parseword(codebase + 0x0b0) & ~3;
		dd->dd_pacode[0xc] = parseword(codebase + 0x0c0) & ~3;
		dd->dd_pacode[0xd] = parseword(codebase + 0x0d0) & ~3;
		dd->dd_pacode[0xe] = parseword(codebase + 0x0e0) & ~3;
		dd->dd_pacode[0xf] = parseword(codebase + 0x0f0) & ~3;
	} else {	/* STI_DEVTYPE4 */
		bus_space_read_raw_region_4(memt, romh, 0, (u_int8_t *)dd,
		    sizeof(*dd));
		/* fix pacode... */
		bus_space_read_raw_region_4(memt, romh, codebase,
		    (u_int8_t *)dd->dd_pacode, sizeof(dd->dd_pacode));
	}

	STI_DISABLE_ROM(rom->rom_softc);

#ifdef STIDEBUG
	printf("dd:\n"
	    "devtype=%x, rev=%x;%d, altt=%x, gid=%08x%08x, font=%x, mss=%x\n"
	    "end=%x, regions=%x, msto=%x, timo=%d, mont=%x, user=%x[%x]\n"
	    "memrq=%x, pwr=%d, bus=%x, ebus=%x, cfb=%x\n"
	    "code=",
	    dd->dd_type & 0xff, dd->dd_grrev, dd->dd_lrrev, dd->dd_altcodet,
	    dd->dd_grid[0], dd->dd_grid[1], dd->dd_fntaddr, dd->dd_maxst,
	    dd->dd_romend, dd->dd_reglst, dd->dd_maxreent, dd->dd_maxtimo,
	    dd->dd_montbl, dd->dd_udaddr, dd->dd_udsize, dd->dd_stimemreq,
	    dd->dd_pwruse, dd->dd_bussup, dd->dd_ebussup, dd->dd_cfbaddr);
	printf("%x,%x,%x,%x,%x,%x,%x,%x,%x,%x,%x,%x,%x,%x,%x,%x\n",
	    dd->dd_pacode[0x0], dd->dd_pacode[0x1], dd->dd_pacode[0x2],
	    dd->dd_pacode[0x3], dd->dd_pacode[0x4], dd->dd_pacode[0x5],
	    dd->dd_pacode[0x6], dd->dd_pacode[0x7], dd->dd_pacode[0x8],
	    dd->dd_pacode[0x9], dd->dd_pacode[0xa], dd->dd_pacode[0xb],
	    dd->dd_pacode[0xc], dd->dd_pacode[0xd], dd->dd_pacode[0xe],
	    dd->dd_pacode[0xf]);
#endif

	/*
	 * Figure out how much bytes we need for the STI code.
	 * Note there could be fewer than STI_END entries pointer
	 * entries populated, especially on older devices.
	 */

	for (i = STI_END; !dd->dd_pacode[i]; i--)
		;
	size = dd->dd_pacode[i] - dd->dd_pacode[STI_BEGIN];
	if (rom->rom_devtype == STI_DEVTYPE1)
		size = (size + 3) / 4;
	if (size == 0) {
		printf(": no code for the requested platform\n");
		return (EINVAL);
	}

	if ((rom->rom_code = (vaddr_t)km_alloc(round_page(size), &kv_any,
	    &kp_dirty, &kd_nowait)) == 0) {
		printf(": cannot allocate %u bytes for code\n", size);
		return (ENOMEM);
	}
#ifdef STIDEBUG
	printf("code=0x%x[%x]\n", rom->rom_code, size);
#endif

	/*
	 * Copy code into memory and make it executable.
	 */

	STI_ENABLE_ROM(rom->rom_softc);

	if (rom->rom_devtype == STI_DEVTYPE1) {
		u_int8_t *p = (u_int8_t *)rom->rom_code;
		u_int32_t addr, eaddr;

		for (addr = dd->dd_pacode[STI_BEGIN], eaddr = addr + size * 4;
		    addr < eaddr; addr += 4 )
			*p++ = bus_space_read_4(memt, romh, addr) & 0xff;

	} else	/* STI_DEVTYPE4 */
		bus_space_read_raw_region_4(memt, romh,
		    dd->dd_pacode[STI_BEGIN], (u_int8_t *)rom->rom_code,
		    size);

	STI_DISABLE_ROM(rom->rom_softc);

	if ((error = uvm_map_protect(kernel_map, rom->rom_code,
	    rom->rom_code + round_page(size), UVM_PROT_RX, FALSE))) {
		printf(": uvm_map_protect failed (%d)\n", error);
		km_free((void *)rom->rom_code, round_page(size), &kv_any,
		    &kp_dirty);
		return (error);
	}

	/*
	 * Setup code function pointers.
	 */

#define	O(i) \
	(dd->dd_pacode[(i)] == 0 ? 0 : \
	    (rom->rom_code + (dd->dd_pacode[(i)] - dd->dd_pacode[0]) / \
	      (rom->rom_devtype == STI_DEVTYPE1? 4 : 1)))

	rom->init	= (sti_init_t)	O(STI_INIT_GRAPH);
	rom->mgmt	= (sti_mgmt_t)	O(STI_STATE_MGMT);
	rom->unpmv	= (sti_unpmv_t)	O(STI_FONT_UNPMV);
	rom->blkmv	= (sti_blkmv_t)	O(STI_BLOCK_MOVE);
	rom->test	= (sti_test_t)	O(STI_SELF_TEST);
	rom->exhdl	= (sti_exhdl_t)	O(STI_EXCEP_HDLR);
	rom->inqconf	= (sti_inqconf_t)O(STI_INQ_CONF);
	rom->scment	= (sti_scment_t)O(STI_SCM_ENT);
	rom->dmac	= (sti_dmac_t)	O(STI_DMA_CTRL);
	rom->flowc	= (sti_flowc_t)	O(STI_FLOW_CTRL);
	rom->utiming	= (sti_utiming_t)O(STI_UTIMING);
	rom->pmgr	= (sti_pmgr_t)	O(STI_PROC_MGR);
	rom->util	= (sti_util_t)	O(STI_UTIL);

#undef	O

	/*
	 * Set colormap entry is not implemented until 8.04, so force
	 * a NULL pointer here.
	 */
	if (dd->dd_grrev < STI_REVISION(8,4)) {
		rom->scment = NULL;
	}

	return (0);
}
示例#19
0
int
i386_set_ldt(struct proc *p, void *args, register_t *retval)
{
    int error, i, n;
    struct pcb *pcb = &p->p_addr->u_pcb;
    pmap_t pmap = p->p_vmspace->vm_map.pmap;
    struct i386_set_ldt_args ua;
    union descriptor *descv;
    size_t old_len, new_len, ldt_len;
    union descriptor *old_ldt, *new_ldt;

    if (user_ldt_enable == 0)
        return (ENOSYS);

    if ((error = copyin(args, &ua, sizeof(ua))) != 0)
        return (error);

    if (ua.start < 0 || ua.num < 0 || ua.start > 8192 || ua.num > 8192 ||
            ua.start + ua.num > 8192)
        return (EINVAL);

    descv = malloc(sizeof (*descv) * ua.num, M_TEMP, M_NOWAIT);
    if (descv == NULL)
        return (ENOMEM);

    if ((error = copyin(ua.desc, descv, sizeof (*descv) * ua.num)) != 0)
        goto out;

    /* Check descriptors for access violations. */
    for (i = 0; i < ua.num; i++) {
        union descriptor *desc = &descv[i];

        switch (desc->sd.sd_type) {
        case SDT_SYSNULL:
            desc->sd.sd_p = 0;
            break;
        case SDT_SYS286CGT:
        case SDT_SYS386CGT:
            /*
             * Only allow call gates targeting a segment
             * in the LDT or a user segment in the fixed
             * part of the gdt.  Segments in the LDT are
             * constrained (below) to be user segments.
             */
            if (desc->gd.gd_p != 0 &&
                    !ISLDT(desc->gd.gd_selector) &&
                    ((IDXSEL(desc->gd.gd_selector) >= NGDT) ||
                     (gdt[IDXSEL(desc->gd.gd_selector)].sd.sd_dpl !=
                      SEL_UPL))) {
                error = EACCES;
                goto out;
            }
            break;
        case SDT_MEMEC:
        case SDT_MEMEAC:
        case SDT_MEMERC:
        case SDT_MEMERAC:
            /* Must be "present" if executable and conforming. */
            if (desc->sd.sd_p == 0) {
                error = EACCES;
                goto out;
            }
            break;
        case SDT_MEMRO:
        case SDT_MEMROA:
        case SDT_MEMRW:
        case SDT_MEMRWA:
        case SDT_MEMROD:
        case SDT_MEMRODA:
        case SDT_MEMRWD:
        case SDT_MEMRWDA:
        case SDT_MEME:
        case SDT_MEMEA:
        case SDT_MEMER:
        case SDT_MEMERA:
            break;
        default:
            /*
             * Make sure that unknown descriptor types are
             * not marked present.
             */
            if (desc->sd.sd_p != 0) {
                error = EACCES;
                goto out;
            }
            break;
        }

        if (desc->sd.sd_p != 0) {
            /* Only user (ring-3) descriptors may be present. */
            if (desc->sd.sd_dpl != SEL_UPL) {
                error = EACCES;
                goto out;
            }
        }
    }

    /* allocate user ldt */
    simple_lock(&pmap->pm_lock);
    if (pmap->pm_ldt == 0 || (ua.start + ua.num) > pmap->pm_ldt_len) {
        if (pmap->pm_flags & PMF_USER_LDT)
            ldt_len = pmap->pm_ldt_len;
        else
            ldt_len = 512;
        while ((ua.start + ua.num) > ldt_len)
            ldt_len *= 2;
        new_len = ldt_len * sizeof(union descriptor);

        simple_unlock(&pmap->pm_lock);
        new_ldt = km_alloc(round_page(new_len), &kv_any,
                           &kp_dirty, &kd_nowait);
        if (new_ldt == NULL) {
            error = ENOMEM;
            goto out;
        }
        simple_lock(&pmap->pm_lock);

        if (pmap->pm_ldt != NULL && ldt_len <= pmap->pm_ldt_len) {
            /*
             * Another thread (re)allocated the LDT to
             * sufficient size while we were blocked in
             * km_alloc. Oh well. The new entries
             * will quite probably not be right, but
             * hey.. not our problem if user applications
             * have race conditions like that.
             */
            km_free(new_ldt, round_page(new_len), &kv_any,
                    &kp_dirty);
            goto copy;
        }

        old_ldt = pmap->pm_ldt;

        if (old_ldt != NULL) {
            old_len = pmap->pm_ldt_len * sizeof(union descriptor);
        } else {
            old_len = NLDT * sizeof(union descriptor);
            old_ldt = ldt;
        }

        memcpy(new_ldt, old_ldt, old_len);
        memset((caddr_t)new_ldt + old_len, 0, new_len - old_len);

        if (old_ldt != ldt)
            km_free(old_ldt, round_page(old_len),
                    &kv_any, &kp_dirty);

        pmap->pm_ldt = new_ldt;
        pmap->pm_ldt_len = ldt_len;

        if (pmap->pm_flags & PMF_USER_LDT)
            ldt_free(pmap);
        else
            pmap->pm_flags |= PMF_USER_LDT;
        ldt_alloc(pmap, new_ldt, new_len);
        pcb->pcb_ldt_sel = pmap->pm_ldt_sel;
        if (pcb == curpcb)
            lldt(pcb->pcb_ldt_sel);

    }
copy:
    /* Now actually replace the descriptors. */
    for (i = 0, n = ua.start; i < ua.num; i++, n++)
        pmap->pm_ldt[n] = descv[i];

    simple_unlock(&pmap->pm_lock);

    *retval = ua.start;

out:
    free(descv, M_TEMP);
    return (error);
}
示例#20
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
    caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	paddr_t pa;
	bus_addr_t addr;
	int curseg, error, pmap_flags;
	const struct kmem_dyn_mode *kd;

	if (nsegs == 1) {
		pa = (*t->_device_to_pa)(segs[0].ds_addr);
		if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
			*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_NC);
		else
			*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_CACHED);
		return (0);
	}

	size = round_page(size);
	kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
	va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, kd);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	pmap_flags = PMAP_WIRED | PMAP_CANFAIL;
	if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
		pmap_flags |= PMAP_NOCACHE;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += NBPG, va += NBPG, size -= NBPG) {
			if (size == 0)
				panic("_dmamem_map: size botch");
			pa = (*t->_device_to_pa)(addr);
			error = pmap_enter(pmap_kernel(), va, pa,
			    PROT_READ | PROT_WRITE,
			    PROT_READ | PROT_WRITE | pmap_flags);
			if (error) {
				pmap_update(pmap_kernel());
				km_free((void *)sva, ssize, &kv_any, &kp_none);
				return (error);
			}

			/*
			 * This is redundant with what pmap_enter() did 
			 * above, but will take care of forcing other
			 * mappings of the same page (if any) to be
			 * uncached. 
			 * If there are no multiple mappings of that 
			 * page, this amounts to a noop.
			 */
			if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE)) 
				pmap_page_cache(PHYS_TO_VM_PAGE(pa),
				    PGF_UNCACHED);
		}
		pmap_update(pmap_kernel());
	}

	return (0);
}