示例#1
0
void
kvm86_init()
{
	size_t vmdsize;
	char *buf;
	struct kvm86_data *vmd;
	struct pcb *pcb;
	paddr_t pa;
	int i;

	vmdsize = round_page(sizeof(struct kvm86_data)) + PAGE_SIZE;

	if ((buf = km_alloc(vmdsize, &kv_any, &kp_zero, &kd_waitok)) == NULL)
		return;
	
	/* first page is stack */
	vmd = (struct kvm86_data *)(buf + PAGE_SIZE);
	pcb = &vmd->pcb;

	/*
	 * derive pcb and TSS from proc0
	 * we want to access all IO ports, so we need a full-size
	 *  permission bitmap
	 * XXX do we really need the pcb or just the TSS?
	 */
	memcpy(pcb, &proc0.p_addr->u_pcb, sizeof(struct pcb));
	pcb->pcb_tss.tss_esp0 = (int)vmd;
	pcb->pcb_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
	for (i = 0; i < sizeof(vmd->iomap) / 4; i++)
		vmd->iomap[i] = 0;
	pcb->pcb_tss.tss_ioopt =
	    ((caddr_t)vmd->iomap - (caddr_t)&pcb->pcb_tss) << 16;

	/* setup TSS descriptor (including our iomap) */
	setsegment(&vmd->sd, &pcb->pcb_tss,
	    sizeof(struct pcb) + sizeof(vmd->iomap) - 1,
	    SDT_SYS386TSS, SEL_KPL, 0, 0);

	/* prepare VM for BIOS calls */
	kvm86_mapbios(vmd);
	if ((bioscallscratchpage = km_alloc(PAGE_SIZE, &kv_any, &kp_dirty,
	    &kd_waitok)) == NULL)
		return;

	pmap_extract(pmap_kernel(), (vaddr_t)bioscallscratchpage, &pa);
	kvm86_map(vmd, pa, BIOSCALLSCRATCHPAGE_VMVA);
	bioscallvmd = vmd;
	bioscalltmpva = (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none,
	    &kd_waitok);
	mtx_init(&kvm86_mp_mutex, IPL_IPI);
}
示例#2
0
int
acpi_map(paddr_t pa, size_t len, struct acpi_mem_map *handle)
{
	paddr_t pgpa = trunc_page(pa);
	paddr_t endpa = round_page(pa + len);
	vaddr_t va = (vaddr_t)km_alloc(endpa - pgpa, &kv_any, &kp_none,
	    &kd_nowait);

	if (va == 0)
		return (ENOMEM);

	handle->baseva = va;
	handle->va = (u_int8_t *)(va + (pa & PGOFSET));
	handle->vsize = endpa - pgpa;
	handle->pa = pa;

	do {
		pmap_kenter_pa(va, pgpa, VM_PROT_READ | VM_PROT_WRITE);
		va += NBPG;
		pgpa += NBPG;
	} while (pgpa < endpa);
	pmap_update(pmap_kernel());

	return 0;
}
示例#3
0
/*
 * MD-specific resume preparation (creating resume time pagetables,
 * stacks, etc).
 */
void
hibernate_prepare_resume_machdep(union hibernate_info *hib_info)
{
	paddr_t pa, piglet_end;
	vaddr_t va;

	/*
	 * At this point, we are sure that the piglet's phys space is going to
	 * have been unused by the suspending kernel, but the vaddrs used by
	 * the suspending kernel may or may not be available to us here in the
	 * resuming kernel, so we allocate a new range of VAs for the piglet.
	 * Those VAs will be temporary and will cease to exist as soon as we
	 * switch to the resume PT, so we need to ensure that any VAs required
	 * during inflate are also entered into that map.
	 */

        hib_info->piglet_va = (vaddr_t)km_alloc(HIBERNATE_CHUNK_SIZE*3,
	    &kv_any, &kp_none, &kd_nowait);
        if (!hib_info->piglet_va)
                panic("Unable to allocate vaddr for hibernate resume piglet\n");

	piglet_end = hib_info->piglet_pa + HIBERNATE_CHUNK_SIZE*3;

	for (pa = hib_info->piglet_pa,va = hib_info->piglet_va;
	    pa <= piglet_end; pa += PAGE_SIZE, va += PAGE_SIZE)
		pmap_kenter_pa(va, pa, VM_PROT_ALL);

	pmap_activate(curproc);
}
示例#4
0
/*
 * Initialize a uvm_pseg.
 *
 * May fail, in which case seg->start == 0.
 *
 * Caller locks uvm_pseg_lck.
 */
void
uvm_pseg_init(struct uvm_pseg *pseg)
{
	KASSERT(pseg->start == 0);
	KASSERT(pseg->use == 0);
	pseg->start = (vaddr_t)km_alloc(MAX_PAGER_SEGS * MAXBSIZE, &kv_any,
	    &kp_none, &kd_trylock);
}
示例#5
0
文件: gdt.c 项目: SylvestreG/bitrig
/*
 * Allocate shadow GDT for a slave cpu.
 */
void
gdt_alloc_cpu(struct cpu_info *ci)
{
	/* Can't sleep, in autoconf */
	ci->ci_gdt = km_alloc(round_page(GDT_SIZE + sizeof(*ci->ci_tss)),
	    &kv_any, &kp_zero, &kd_nowait);
	if (ci->ci_gdt == NULL)
		panic("gdt_init: can't alloc");
	ci->ci_tss = (struct x86_64_tss *)((int8_t *)ci->ci_gdt + GDT_SIZE);

	bcopy(gdtstore, ci->ci_gdt, GDT_SIZE);
}
示例#6
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	bus_addr_t addr;
	int curseg;
#ifdef DEBUG_DMA
	pt_entry_t *ptep;
#endif

#ifdef DEBUG_DMA
	printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t,
	    segs, nsegs, (unsigned long)size, flags);
#endif	/* DEBUG_DMA */

	size = round_page(size);
	va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, &kd_nowait);

	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
#ifdef DEBUG_DMA
			printf("wiring p%lx to v%lx", addr, va);
#endif	/* DEBUG_DMA */
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			pmap_kenter_cache(va, addr,
			    VM_PROT_READ | VM_PROT_WRITE,
			    !(flags & BUS_DMA_COHERENT));

#ifdef DEBUG_DMA
			ptep = vtopte(va);
			printf(" pte=v%p *pte=%x\n", ptep, *ptep);
#endif	/* DEBUG_DMA */
		}
	}
	pmap_update(pmap_kernel());
#ifdef DEBUG_DMA
	printf("dmamem_map: =%p\n", *kvap);
#endif	/* DEBUG_DMA */
	return (0);
}
示例#7
0
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, caddr_t *kvap, int flags)
{
	vaddr_t va;
	bus_addr_t addr;
	int curseg;
	const struct kmem_dyn_mode *kd;

	DPRINTF(("bus_dmamem_map: t = %p, segs = %p, nsegs = %d, size = %d, kvap = %p, flags = %x\n", t, segs, nsegs, size, kvap, flags));

        /*
	 * If we're only mapping 1 segment, use P2SEG, to avoid
	 * TLB thrashing.
	 */
	if (nsegs == 1) {
		if (flags & BUS_DMA_COHERENT) {
			*kvap = (caddr_t)SH3_PHYS_TO_P2SEG(segs[0].ds_addr);
		} else {
			*kvap = (caddr_t)SH3_PHYS_TO_P1SEG(segs[0].ds_addr);
		}
		DPRINTF(("bus_dmamem_map: addr = 0x%08lx, kva = %p\n", segs[0].ds_addr, *kvap));
		return 0;
	}

	/* Always round the size. */
	size = round_page(size);
	kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
	va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, kd);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;
	for (curseg = 0; curseg < nsegs; curseg++) {
		DPRINTF(("bus_dmamem_map: segs[%d]: ds_addr = 0x%08lx, ds_len = %ld\n", curseg, segs[curseg].ds_addr, segs[curseg].ds_len));
		for (addr = segs[curseg].ds_addr;
		     addr < segs[curseg].ds_addr + segs[curseg].ds_len;
		     addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			pmap_kenter_pa(va, addr,
			    PROT_READ | PROT_WRITE);
		}
	}
	pmap_update(pmap_kernel());

	return (0);
}
示例#8
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	bus_addr_t addr;
	int curseg, pmapflags = 0, ret;
	const struct kmem_dyn_mode *kd;

	if (flags & BUS_DMA_NOCACHE)
		pmapflags |= PMAP_NOCACHE;

	size = round_page(size);
	kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
	va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, kd);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			/*
			 * we don't want pmap to panic here if it can't
			 * alloc
			 */
			ret = pmap_enter(pmap_kernel(), va, addr | pmapflags,
			    PROT_READ | PROT_WRITE,
			    PROT_READ | PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
			if (ret) {
				pmap_update(pmap_kernel());
				km_free((void *)sva, ssize, &kv_any, &kp_none);
				return (ret);
			}

		}
	}
	pmap_update(pmap_kernel());

	return (0);
}
示例#9
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	bus_addr_t addr;
	int curseg, pmapflags = 0, error;

	if (nsegs == 1 && (flags & BUS_DMA_NOCACHE) == 0) {
		*kvap = (caddr_t)PMAP_DIRECT_MAP(segs[0].ds_addr);
		return (0);
	}

	if (flags & BUS_DMA_NOCACHE)
		pmapflags |= PMAP_NOCACHE;

	size = round_page(size);
	va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, &kd_nowait);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			error = pmap_enter(pmap_kernel(), va, addr | pmapflags,
			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
			    VM_PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
			if (error) {
				pmap_update(pmap_kernel());
				km_free((void *)sva, ssize, &kv_any, &kp_none);
				return (error);
			}
		}
	}
	pmap_update(pmap_kernel());

	return (0);
}
示例#10
0
int
cpu_alloc_idle_pcb(struct cpu_info *ci)
{
	vaddr_t uaddr;
	struct pcb *pcb;
	struct trapframe *tf;

	/*
	 * Generate a kernel stack and PCB (in essence, a u-area) for the
	 * new CPU.
	 */
	uaddr = (vaddr_t)km_alloc(USPACE, &kv_any, &kp_zero, &kd_nowait);
	if (uaddr == 0) {
		printf("%s: unable to allocate idle stack\n",
		    __func__);
		return 1;
	}
	ci->ci_idle_pcb = pcb = (struct pcb *)uaddr;

	/*
	 * This code is largely derived from cpu_fork(), with which it
	 * should perhaps be shared.
	 */

	/* Copy the pcb */
	*pcb = proc0.p_addr->u_pcb;

	/* Set up the undefined stack for the process. */
	pcb->pcb_un.un_32.pcb32_und_sp = uaddr + USPACE_UNDEF_STACK_TOP;
	pcb->pcb_un.un_32.pcb32_sp = uaddr + USPACE_SVC_STACK_TOP;

#ifdef STACKCHECKS
	/* Fill the undefined stack with a known pattern */
	memset(((u_char *)uaddr) + USPACE_UNDEF_STACK_BOTTOM, 0xdd,
	    (USPACE_UNDEF_STACK_TOP - USPACE_UNDEF_STACK_BOTTOM));
	/* Fill the kernel stack with a known pattern */
	memset(((u_char *)uaddr) + USPACE_SVC_STACK_BOTTOM, 0xdd,
	    (USPACE_SVC_STACK_TOP - USPACE_SVC_STACK_BOTTOM));
#endif	/* STACKCHECKS */

	pcb->pcb_tf = tf =
	    (struct trapframe *)pcb->pcb_un.un_32.pcb32_sp - 1;
	*tf = *proc0.p_addr->u_pcb.pcb_tf;
	return 0;
}
示例#11
0
int
i80321_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flag,
    bus_space_handle_t *bshp)
{
	const struct pmap_devmap *pd;
	paddr_t startpa, endpa, pa, pagecnt;
	vaddr_t va;
	pt_entry_t *pte;

	if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) {
		/* Device was statically mapped. */
		*bshp = pd->pd_va + (bpa - pd->pd_pa);
		return (0);
	}

#if 0
printf("i80321_bs_map bpa %x, size %x flag %x\n", bpa, size, flag);
#endif
	endpa = round_page(bpa + size);
	startpa = trunc_page(bpa);
	pagecnt = endpa - startpa;

	va = (vaddr_t)km_alloc(endpa - startpa, &kv_any, &kp_none, &kd_nowait);
	if (va == 0)
		return(ENOMEM);
#if 0
printf("i80321_bs_map va %x pa %x, endpa %x, sz %x\n", va, startpa,
    endpa, endpa-startpa);
#endif

	*bshp = (bus_space_handle_t)(va + (bpa - startpa));

	for (pa = startpa; pagecnt > 0;
	    pa += PAGE_SIZE, va += PAGE_SIZE, pagecnt -= PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
		if ((flag & BUS_SPACE_MAP_CACHEABLE) == 0) {
			pte = vtopte(va);
			*pte &= ~L2_S_CACHE_MASK;
			PTE_SYNC(pte);
		}
	}
	pmap_update(pmap_kernel());

	return (0);
}
示例#12
0
/*
 * Create writeable aliases of memory we need
 * to write to as kernel is mapped read-only
 */
void *codepatch_maprw(vaddr_t *nva, vaddr_t dest)
{
	paddr_t kva = trunc_page((paddr_t)dest);
	paddr_t po = (paddr_t)dest & PAGE_MASK;
	paddr_t pa1, pa2;

	if (*nva == 0)
		*nva = (vaddr_t)km_alloc(2 * PAGE_SIZE, &kv_any, &kp_none,
					&kd_waitok);

	pmap_extract(pmap_kernel(), kva, &pa1);
	pmap_extract(pmap_kernel(), kva + PAGE_SIZE, &pa2);
	pmap_kenter_pa(*nva, pa1, PROT_READ | PROT_WRITE);
	pmap_kenter_pa(*nva + PAGE_SIZE, pa2, PROT_READ | PROT_WRITE);
	pmap_update(pmap_kernel());

	return (void *)(*nva + po);
}
示例#13
0
int
cpu_alloc_arm_stack(struct cpu_info *ci)
{
	vaddr_t uaddr;

	/*
	 * Generate an irq and abort stack for the new CPU.
	 */
	uaddr = (vaddr_t)km_alloc(ARM_STACK_SIZE, &kv_any, &kp_zero, &kd_nowait);
	if (uaddr == 0) {
		printf("%s: unable to allocate arm stack\n",
		    __func__);
		return 1;
	}
	ci->ci_irqstack = uaddr + IRQ_STACK_TOP;
	ci->ci_abtstack = uaddr + ABT_STACK_TOP;

	return 0;
}
示例#14
0
/*
 * Allocate kva for pipe circular buffer, the space is pageable.
 * This routine will 'realloc' the size of a pipe safely, if it fails
 * it will retain the old buffer.
 * If it fails it will return ENOMEM.
 */
int
pipespace(struct pipe *cpipe, u_int size)
{
	caddr_t buffer;

	buffer = km_alloc(size, &kv_any, &kp_pageable, &kd_waitok);
	if (buffer == NULL) {
		return (ENOMEM);
	}

	/* free old resources if we are resizing */
	pipe_free_kmem(cpipe);
	cpipe->pipe_buffer.buffer = buffer;
	cpipe->pipe_buffer.size = size;
	cpipe->pipe_buffer.in = 0;
	cpipe->pipe_buffer.out = 0;
	cpipe->pipe_buffer.cnt = 0;

	amountpipekva += cpipe->pipe_buffer.size;

	return (0);
}
示例#15
0
void
kmstartup(void)
{
	char *cp;
	struct gmonparam *p = &_gmonparam;
	int size;

	/*
	 * Round lowpc and highpc to multiples of the density we're using
	 * so the rest of the scaling (here and in gprof) stays in ints.
	 */
	p->lowpc = ROUNDDOWN(KERNBASE, HISTFRACTION * sizeof(HISTCOUNTER));
	p->highpc = ROUNDUP((u_long)etext, HISTFRACTION * sizeof(HISTCOUNTER));
	p->textsize = p->highpc - p->lowpc;
	printf("Profiling kernel, textsize=%ld [%lx..%lx]\n",
	       p->textsize, p->lowpc, p->highpc);
	p->kcountsize = p->textsize / HISTFRACTION;
	p->hashfraction = HASHFRACTION;
	p->fromssize = p->textsize / HASHFRACTION;
	p->tolimit = p->textsize * ARCDENSITY / 100;
	if (p->tolimit < MINARCS)
		p->tolimit = MINARCS;
	else if (p->tolimit > MAXARCS)
		p->tolimit = MAXARCS;
	p->tossize = p->tolimit * sizeof(struct tostruct);
	size = p->kcountsize + p->fromssize + p->tossize;
	cp = km_alloc(round_page(size), &kv_any, &kp_zero, &kd_nowait);
	if (cp == NULL) {
		printf("No memory for profiling.\n");
		return;
	}
	p->tos = (struct tostruct *)cp;
	cp += p->tossize;
	p->kcount = (u_short *)cp;
	cp += p->kcountsize;
	p->froms = (u_short *)cp;
}
示例#16
0
/*
 * Map a chunk of memory read-only and return an appropriately
 * const'ed pointer.
 */
const void *
mpbios_map(paddr_t pa, int len, struct mp_map *handle)
{
    paddr_t pgpa = trunc_page(pa);
    paddr_t endpa = round_page(pa + len);
    vaddr_t va = (vaddr_t)km_alloc(endpa - pgpa, &kv_any, &kp_none,
                                   &kd_nowait);
    vaddr_t retva = va + (pa & PGOFSET);

    handle->pa = pa;
    handle->pg = pgpa;
    handle->psize = len;
    handle->baseva = va;
    handle->vsize = endpa - pgpa;

    do {
        pmap_kenter_pa(va, pgpa, VM_PROT_READ);
        va += PAGE_SIZE;
        pgpa += PAGE_SIZE;
    } while (pgpa < endpa);
    pmap_update(pmap_kernel());

    return ((const void *)retva);
}
示例#17
0
int
sti_rom_setup(struct sti_rom *rom, bus_space_tag_t iot, bus_space_tag_t memt,
    bus_space_handle_t romh, bus_addr_t *bases, u_int codebase)
{
	struct sti_dd *dd;
	int error, size, i;

	STI_ENABLE_ROM(rom->rom_softc);

	rom->iot = iot;
	rom->memt = memt;
	rom->romh = romh;
	rom->bases = bases;

	/*
	 * Get ROM header and code function pointers.
	 */

	dd = &rom->rom_dd;
	rom->rom_devtype = bus_space_read_1(memt, romh, 3);
	if (rom->rom_devtype == STI_DEVTYPE1) {
		dd->dd_type      = bus_space_read_1(memt, romh, 0x03);
		dd->dd_nmon      = bus_space_read_1(memt, romh, 0x07);
		dd->dd_grrev     = bus_space_read_1(memt, romh, 0x0b);
		dd->dd_lrrev     = bus_space_read_1(memt, romh, 0x0f);
		dd->dd_grid[0]   = parseword(0x10);
		dd->dd_grid[1]   = parseword(0x20);
		dd->dd_fntaddr   = parseword(0x30) & ~3;
		dd->dd_maxst     = parseword(0x40);
		dd->dd_romend    = parseword(0x50) & ~3;
		dd->dd_reglst    = parseword(0x60) & ~3;
		dd->dd_maxreent  = parseshort(0x70);
		dd->dd_maxtimo   = parseshort(0x78);
		dd->dd_montbl    = parseword(0x80) & ~3;
		dd->dd_udaddr    = parseword(0x90) & ~3;
		dd->dd_stimemreq = parseword(0xa0);
		dd->dd_udsize    = parseword(0xb0);
		dd->dd_pwruse    = parseshort(0xc0);
		dd->dd_bussup    = bus_space_read_1(memt, romh, 0xcb);
		dd->dd_ebussup   = bus_space_read_1(memt, romh, 0xcf);
		dd->dd_altcodet  = bus_space_read_1(memt, romh, 0xd3);
		dd->dd_eddst[0]  = bus_space_read_1(memt, romh, 0xd7);
		dd->dd_eddst[1]  = bus_space_read_1(memt, romh, 0xdb);
		dd->dd_eddst[2]  = bus_space_read_1(memt, romh, 0xdf);
		dd->dd_cfbaddr   = parseword(0xe0) & ~3;

		codebase <<= 2;
		dd->dd_pacode[0x0] = parseword(codebase + 0x000) & ~3;
		dd->dd_pacode[0x1] = parseword(codebase + 0x010) & ~3;
		dd->dd_pacode[0x2] = parseword(codebase + 0x020) & ~3;
		dd->dd_pacode[0x3] = parseword(codebase + 0x030) & ~3;
		dd->dd_pacode[0x4] = parseword(codebase + 0x040) & ~3;
		dd->dd_pacode[0x5] = parseword(codebase + 0x050) & ~3;
		dd->dd_pacode[0x6] = parseword(codebase + 0x060) & ~3;
		dd->dd_pacode[0x7] = parseword(codebase + 0x070) & ~3;
		dd->dd_pacode[0x8] = parseword(codebase + 0x080) & ~3;
		dd->dd_pacode[0x9] = parseword(codebase + 0x090) & ~3;
		dd->dd_pacode[0xa] = parseword(codebase + 0x0a0) & ~3;
		dd->dd_pacode[0xb] = parseword(codebase + 0x0b0) & ~3;
		dd->dd_pacode[0xc] = parseword(codebase + 0x0c0) & ~3;
		dd->dd_pacode[0xd] = parseword(codebase + 0x0d0) & ~3;
		dd->dd_pacode[0xe] = parseword(codebase + 0x0e0) & ~3;
		dd->dd_pacode[0xf] = parseword(codebase + 0x0f0) & ~3;
	} else {	/* STI_DEVTYPE4 */
		bus_space_read_raw_region_4(memt, romh, 0, (u_int8_t *)dd,
		    sizeof(*dd));
		/* fix pacode... */
		bus_space_read_raw_region_4(memt, romh, codebase,
		    (u_int8_t *)dd->dd_pacode, sizeof(dd->dd_pacode));
	}

	STI_DISABLE_ROM(rom->rom_softc);

#ifdef STIDEBUG
	printf("dd:\n"
	    "devtype=%x, rev=%x;%d, altt=%x, gid=%08x%08x, font=%x, mss=%x\n"
	    "end=%x, regions=%x, msto=%x, timo=%d, mont=%x, user=%x[%x]\n"
	    "memrq=%x, pwr=%d, bus=%x, ebus=%x, cfb=%x\n"
	    "code=",
	    dd->dd_type & 0xff, dd->dd_grrev, dd->dd_lrrev, dd->dd_altcodet,
	    dd->dd_grid[0], dd->dd_grid[1], dd->dd_fntaddr, dd->dd_maxst,
	    dd->dd_romend, dd->dd_reglst, dd->dd_maxreent, dd->dd_maxtimo,
	    dd->dd_montbl, dd->dd_udaddr, dd->dd_udsize, dd->dd_stimemreq,
	    dd->dd_pwruse, dd->dd_bussup, dd->dd_ebussup, dd->dd_cfbaddr);
	printf("%x,%x,%x,%x,%x,%x,%x,%x,%x,%x,%x,%x,%x,%x,%x,%x\n",
	    dd->dd_pacode[0x0], dd->dd_pacode[0x1], dd->dd_pacode[0x2],
	    dd->dd_pacode[0x3], dd->dd_pacode[0x4], dd->dd_pacode[0x5],
	    dd->dd_pacode[0x6], dd->dd_pacode[0x7], dd->dd_pacode[0x8],
	    dd->dd_pacode[0x9], dd->dd_pacode[0xa], dd->dd_pacode[0xb],
	    dd->dd_pacode[0xc], dd->dd_pacode[0xd], dd->dd_pacode[0xe],
	    dd->dd_pacode[0xf]);
#endif

	/*
	 * Figure out how much bytes we need for the STI code.
	 * Note there could be fewer than STI_END entries pointer
	 * entries populated, especially on older devices.
	 */

	for (i = STI_END; !dd->dd_pacode[i]; i--)
		;
	size = dd->dd_pacode[i] - dd->dd_pacode[STI_BEGIN];
	if (rom->rom_devtype == STI_DEVTYPE1)
		size = (size + 3) / 4;
	if (size == 0) {
		printf(": no code for the requested platform\n");
		return (EINVAL);
	}

	if ((rom->rom_code = (vaddr_t)km_alloc(round_page(size), &kv_any,
	    &kp_dirty, &kd_nowait)) == 0) {
		printf(": cannot allocate %u bytes for code\n", size);
		return (ENOMEM);
	}
#ifdef STIDEBUG
	printf("code=0x%x[%x]\n", rom->rom_code, size);
#endif

	/*
	 * Copy code into memory and make it executable.
	 */

	STI_ENABLE_ROM(rom->rom_softc);

	if (rom->rom_devtype == STI_DEVTYPE1) {
		u_int8_t *p = (u_int8_t *)rom->rom_code;
		u_int32_t addr, eaddr;

		for (addr = dd->dd_pacode[STI_BEGIN], eaddr = addr + size * 4;
		    addr < eaddr; addr += 4 )
			*p++ = bus_space_read_4(memt, romh, addr) & 0xff;

	} else	/* STI_DEVTYPE4 */
		bus_space_read_raw_region_4(memt, romh,
		    dd->dd_pacode[STI_BEGIN], (u_int8_t *)rom->rom_code,
		    size);

	STI_DISABLE_ROM(rom->rom_softc);

	if ((error = uvm_map_protect(kernel_map, rom->rom_code,
	    rom->rom_code + round_page(size), UVM_PROT_RX, FALSE))) {
		printf(": uvm_map_protect failed (%d)\n", error);
		km_free((void *)rom->rom_code, round_page(size), &kv_any,
		    &kp_dirty);
		return (error);
	}

	/*
	 * Setup code function pointers.
	 */

#define	O(i) \
	(dd->dd_pacode[(i)] == 0 ? 0 : \
	    (rom->rom_code + (dd->dd_pacode[(i)] - dd->dd_pacode[0]) / \
	      (rom->rom_devtype == STI_DEVTYPE1? 4 : 1)))

	rom->init	= (sti_init_t)	O(STI_INIT_GRAPH);
	rom->mgmt	= (sti_mgmt_t)	O(STI_STATE_MGMT);
	rom->unpmv	= (sti_unpmv_t)	O(STI_FONT_UNPMV);
	rom->blkmv	= (sti_blkmv_t)	O(STI_BLOCK_MOVE);
	rom->test	= (sti_test_t)	O(STI_SELF_TEST);
	rom->exhdl	= (sti_exhdl_t)	O(STI_EXCEP_HDLR);
	rom->inqconf	= (sti_inqconf_t)O(STI_INQ_CONF);
	rom->scment	= (sti_scment_t)O(STI_SCM_ENT);
	rom->dmac	= (sti_dmac_t)	O(STI_DMA_CTRL);
	rom->flowc	= (sti_flowc_t)	O(STI_FLOW_CTRL);
	rom->utiming	= (sti_utiming_t)O(STI_UTIMING);
	rom->pmgr	= (sti_pmgr_t)	O(STI_PROC_MGR);
	rom->util	= (sti_util_t)	O(STI_UTIL);

#undef	O

	/*
	 * Set colormap entry is not implemented until 8.04, so force
	 * a NULL pointer here.
	 */
	if (dd->dd_grrev < STI_REVISION(8,4)) {
		rom->scment = NULL;
	}

	return (0);
}
示例#18
0
/* ARGSUSED */
int
sys_execve(struct proc *p, void *v, register_t *retval)
{
	struct sys_execve_args /* {
		syscallarg(const char *) path;
		syscallarg(char *const *) argp;
		syscallarg(char *const *) envp;
	} */ *uap = v;
	int error;
	struct exec_package pack;
	struct nameidata nid;
	struct vattr attr;
	struct ucred *cred = p->p_ucred;
	char *argp;
	char * const *cpp, *dp, *sp;
#ifdef KTRACE
	char *env_start;
#endif
	struct process *pr = p->p_p;
	long argc, envc;
	size_t len, sgap;
#ifdef MACHINE_STACK_GROWS_UP
	size_t slen;
#endif
	char *stack;
	struct ps_strings arginfo;
	struct vmspace *vm = pr->ps_vmspace;
	char **tmpfap;
	extern struct emul emul_native;
#if NSYSTRACE > 0
	int wassugid = ISSET(pr->ps_flags, PS_SUGID | PS_SUGIDEXEC);
	size_t pathbuflen;
#endif
	char *pathbuf = NULL;
	struct vnode *otvp;

	/* get other threads to stop */
	if ((error = single_thread_set(p, SINGLE_UNWIND, 1)))
		return (error);

	/*
	 * Cheap solution to complicated problems.
	 * Mark this process as "leave me alone, I'm execing".
	 */
	atomic_setbits_int(&pr->ps_flags, PS_INEXEC);

#if NSYSTRACE > 0
	if (ISSET(p->p_flag, P_SYSTRACE)) {
		systrace_execve0(p);
		pathbuf = pool_get(&namei_pool, PR_WAITOK);
		error = copyinstr(SCARG(uap, path), pathbuf, MAXPATHLEN,
		    &pathbuflen);
		if (error != 0)
			goto clrflag;
	}
#endif
	if (pathbuf != NULL) {
		NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_SYSSPACE, pathbuf, p);
	} else {
		NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_USERSPACE,
		    SCARG(uap, path), p);
	}

	/*
	 * initialize the fields of the exec package.
	 */
	if (pathbuf != NULL)
		pack.ep_name = pathbuf;
	else
		pack.ep_name = (char *)SCARG(uap, path);
	pack.ep_hdr = malloc(exec_maxhdrsz, M_EXEC, M_WAITOK);
	pack.ep_hdrlen = exec_maxhdrsz;
	pack.ep_hdrvalid = 0;
	pack.ep_ndp = &nid;
	pack.ep_interp = NULL;
	pack.ep_emul_arg = NULL;
	VMCMDSET_INIT(&pack.ep_vmcmds);
	pack.ep_vap = &attr;
	pack.ep_emul = &emul_native;
	pack.ep_flags = 0;

	/* see if we can run it. */
	if ((error = check_exec(p, &pack)) != 0) {
		goto freehdr;
	}

	/* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */

	/* allocate an argument buffer */
	argp = km_alloc(NCARGS, &kv_exec, &kp_pageable, &kd_waitok);
#ifdef DIAGNOSTIC
	if (argp == NULL)
		panic("execve: argp == NULL");
#endif
	dp = argp;
	argc = 0;

	/* copy the fake args list, if there's one, freeing it as we go */
	if (pack.ep_flags & EXEC_HASARGL) {
		tmpfap = pack.ep_fa;
		while (*tmpfap != NULL) {
			char *cp;

			cp = *tmpfap;
			while (*cp)
				*dp++ = *cp++;
			*dp++ = '\0';

			free(*tmpfap, M_EXEC, 0);
			tmpfap++; argc++;
		}
		free(pack.ep_fa, M_EXEC, 0);
		pack.ep_flags &= ~EXEC_HASARGL;
	}

	/* Now get argv & environment */
	if (!(cpp = SCARG(uap, argp))) {
		error = EFAULT;
		goto bad;
	}

	if (pack.ep_flags & EXEC_SKIPARG)
		cpp++;

	while (1) {
		len = argp + ARG_MAX - dp;
		if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
			goto bad;
		if (!sp)
			break;
		if ((error = copyinstr(sp, dp, len, &len)) != 0) {
			if (error == ENAMETOOLONG)
				error = E2BIG;
			goto bad;
		}
		dp += len;
		cpp++;
		argc++;
	}

	/* must have at least one argument */
	if (argc == 0) {
		error = EINVAL;
		goto bad;
	}

#ifdef KTRACE
	if (KTRPOINT(p, KTR_EXECARGS))
		ktrexec(p, KTR_EXECARGS, argp, dp - argp);
#endif

	envc = 0;
	/* environment does not need to be there */
	if ((cpp = SCARG(uap, envp)) != NULL ) {
#ifdef KTRACE
		env_start = dp;
#endif
		while (1) {
			len = argp + ARG_MAX - dp;
			if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
				goto bad;
			if (!sp)
				break;
			if ((error = copyinstr(sp, dp, len, &len)) != 0) {
				if (error == ENAMETOOLONG)
					error = E2BIG;
				goto bad;
			}
			dp += len;
			cpp++;
			envc++;
		}

#ifdef KTRACE
		if (KTRPOINT(p, KTR_EXECENV))
			ktrexec(p, KTR_EXECENV, env_start, dp - env_start);
#endif
	}

	dp = (char *)(((long)dp + _STACKALIGNBYTES) & ~_STACKALIGNBYTES);

	sgap = STACKGAPLEN;

	/*
	 * If we have enabled random stackgap, the stack itself has already
	 * been moved from a random location, but is still aligned to a page
	 * boundary.  Provide the lower bits of random placement now.
	 */
	if (stackgap_random != 0) {
		sgap += arc4random() & PAGE_MASK;
		sgap = (sgap + _STACKALIGNBYTES) & ~_STACKALIGNBYTES;
	}

	/* Now check if args & environ fit into new stack */
	len = ((argc + envc + 2 + pack.ep_emul->e_arglen) * sizeof(char *) +
	    sizeof(long) + dp + sgap + sizeof(struct ps_strings)) - argp;

	len = (len + _STACKALIGNBYTES) &~ _STACKALIGNBYTES;

	if (len > pack.ep_ssize) { /* in effect, compare to initial limit */
		error = ENOMEM;
		goto bad;
	}

	/* adjust "active stack depth" for process VSZ */
	pack.ep_ssize = len;	/* maybe should go elsewhere, but... */

	/*
	 * we're committed: any further errors will kill the process, so
	 * kill the other threads now.
	 */
	single_thread_set(p, SINGLE_EXIT, 0);

	/*
	 * Prepare vmspace for remapping. Note that uvmspace_exec can replace
	 * pr_vmspace!
	 */
	uvmspace_exec(p, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);

	vm = pr->ps_vmspace;
	/* Now map address space */
	vm->vm_taddr = (char *)trunc_page(pack.ep_taddr);
	vm->vm_tsize = atop(round_page(pack.ep_taddr + pack.ep_tsize) -
	    trunc_page(pack.ep_taddr));
	vm->vm_daddr = (char *)trunc_page(pack.ep_daddr);
	vm->vm_dsize = atop(round_page(pack.ep_daddr + pack.ep_dsize) -
	    trunc_page(pack.ep_daddr));
	vm->vm_dused = 0;
	vm->vm_ssize = atop(round_page(pack.ep_ssize));
	vm->vm_maxsaddr = (char *)pack.ep_maxsaddr;
	vm->vm_minsaddr = (char *)pack.ep_minsaddr;

	/* create the new process's VM space by running the vmcmds */
#ifdef DIAGNOSTIC
	if (pack.ep_vmcmds.evs_used == 0)
		panic("execve: no vmcmds");
#endif
	error = exec_process_vmcmds(p, &pack);

	/* if an error happened, deallocate and punt */
	if (error)
		goto exec_abort;

	/* old "stackgap" is gone now */
	pr->ps_stackgap = 0;

#ifdef MACHINE_STACK_GROWS_UP
	pr->ps_strings = (vaddr_t)vm->vm_maxsaddr + sgap;
        if (uvm_map_protect(&vm->vm_map, (vaddr_t)vm->vm_maxsaddr,
            trunc_page(pr->ps_strings), PROT_NONE, TRUE))
                goto exec_abort;
#else
	pr->ps_strings = (vaddr_t)vm->vm_minsaddr - sizeof(arginfo) - sgap;
        if (uvm_map_protect(&vm->vm_map,
            round_page(pr->ps_strings + sizeof(arginfo)),
            (vaddr_t)vm->vm_minsaddr, PROT_NONE, TRUE))
                goto exec_abort;
#endif

	/* remember information about the process */
	arginfo.ps_nargvstr = argc;
	arginfo.ps_nenvstr = envc;

#ifdef MACHINE_STACK_GROWS_UP
	stack = (char *)vm->vm_maxsaddr + sizeof(arginfo) + sgap;
	slen = len - sizeof(arginfo) - sgap;
#else
	stack = (char *)(vm->vm_minsaddr - len);
#endif
	/* Now copy argc, args & environ to new stack */
	if (!(*pack.ep_emul->e_copyargs)(&pack, &arginfo, stack, argp))
		goto exec_abort;

	/* copy out the process's ps_strings structure */
	if (copyout(&arginfo, (char *)pr->ps_strings, sizeof(arginfo)))
		goto exec_abort;

	stopprofclock(pr);	/* stop profiling */
	fdcloseexec(p);		/* handle close on exec */
	execsigs(p);		/* reset caught signals */
	TCB_SET(p, NULL);	/* reset the TCB address */
	pr->ps_kbind_addr = 0;	/* reset the kbind bits */
	pr->ps_kbind_cookie = 0;

	/* set command name & other accounting info */
	memset(p->p_comm, 0, sizeof(p->p_comm));
	len = min(nid.ni_cnd.cn_namelen, MAXCOMLEN);
	memcpy(p->p_comm, nid.ni_cnd.cn_nameptr, len);
	pr->ps_acflag &= ~AFORK;

	/* record proc's vnode, for use by sysctl */
	otvp = pr->ps_textvp;
	vref(pack.ep_vp);
	pr->ps_textvp = pack.ep_vp;
	if (otvp)
		vrele(otvp);

	atomic_setbits_int(&pr->ps_flags, PS_EXEC);
	if (pr->ps_flags & PS_PPWAIT) {
		atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT);
		atomic_clearbits_int(&pr->ps_pptr->ps_flags, PS_ISPWAIT);
		wakeup(pr->ps_pptr);
	}

	/*
	 * If process does execve() while it has a mismatched real,
	 * effective, or saved uid/gid, we set PS_SUGIDEXEC.
	 */
	if (cred->cr_uid != cred->cr_ruid ||
	    cred->cr_uid != cred->cr_svuid ||
	    cred->cr_gid != cred->cr_rgid ||
	    cred->cr_gid != cred->cr_svgid)
		atomic_setbits_int(&pr->ps_flags, PS_SUGIDEXEC);
	else
		atomic_clearbits_int(&pr->ps_flags, PS_SUGIDEXEC);

	atomic_clearbits_int(&pr->ps_flags, PS_TAMED);
	tame_dropwpaths(pr);

	/*
	 * deal with set[ug]id.
	 * MNT_NOEXEC has already been used to disable s[ug]id.
	 */
	if ((attr.va_mode & (VSUID | VSGID)) && proc_cansugid(p)) {
		int i;

		atomic_setbits_int(&pr->ps_flags, PS_SUGID|PS_SUGIDEXEC);

#ifdef KTRACE
		/*
		 * If process is being ktraced, turn off - unless
		 * root set it.
		 */
		if (pr->ps_tracevp && !(pr->ps_traceflag & KTRFAC_ROOT))
			ktrcleartrace(pr);
#endif
		p->p_ucred = cred = crcopy(cred);
		if (attr.va_mode & VSUID)
			cred->cr_uid = attr.va_uid;
		if (attr.va_mode & VSGID)
			cred->cr_gid = attr.va_gid;

		/*
		 * For set[ug]id processes, a few caveats apply to
		 * stdin, stdout, and stderr.
		 */
		error = 0;
		fdplock(p->p_fd);
		for (i = 0; i < 3; i++) {
			struct file *fp = NULL;

			/*
			 * NOTE - This will never return NULL because of
			 * immature fds. The file descriptor table is not
			 * shared because we're suid.
			 */
			fp = fd_getfile(p->p_fd, i);

			/*
			 * Ensure that stdin, stdout, and stderr are already
			 * allocated.  We do not want userland to accidentally
			 * allocate descriptors in this range which has implied
			 * meaning to libc.
			 */
			if (fp == NULL) {
				short flags = FREAD | (i == 0 ? 0 : FWRITE);
				struct vnode *vp;
				int indx;

				if ((error = falloc(p, &fp, &indx)) != 0)
					break;
#ifdef DIAGNOSTIC
				if (indx != i)
					panic("sys_execve: falloc indx != i");
#endif
				if ((error = cdevvp(getnulldev(), &vp)) != 0) {
					fdremove(p->p_fd, indx);
					closef(fp, p);
					break;
				}
				if ((error = VOP_OPEN(vp, flags, cred, p)) != 0) {
					fdremove(p->p_fd, indx);
					closef(fp, p);
					vrele(vp);
					break;
				}
				if (flags & FWRITE)
					vp->v_writecount++;
				fp->f_flag = flags;
				fp->f_type = DTYPE_VNODE;
				fp->f_ops = &vnops;
				fp->f_data = (caddr_t)vp;
				FILE_SET_MATURE(fp, p);
			}
		}
		fdpunlock(p->p_fd);
		if (error)
			goto exec_abort;
	} else
		atomic_clearbits_int(&pr->ps_flags, PS_SUGID);

	/*
	 * Reset the saved ugids and update the process's copy of the
	 * creds if the creds have been changed
	 */
	if (cred->cr_uid != cred->cr_svuid ||
	    cred->cr_gid != cred->cr_svgid) {
		/* make sure we have unshared ucreds */
		p->p_ucred = cred = crcopy(cred);
		cred->cr_svuid = cred->cr_uid;
		cred->cr_svgid = cred->cr_gid;
	}

	if (pr->ps_ucred != cred) {
		struct ucred *ocred;

		ocred = pr->ps_ucred;
		crhold(cred);
		pr->ps_ucred = cred;
		crfree(ocred);
	}

	if (pr->ps_flags & PS_SUGIDEXEC) {
		int i, s = splclock();

		timeout_del(&pr->ps_realit_to);
		for (i = 0; i < nitems(pr->ps_timer); i++) {
			timerclear(&pr->ps_timer[i].it_interval);
			timerclear(&pr->ps_timer[i].it_value);
		}
		splx(s);
	}

	/* reset CPU time usage for the thread, but not the process */
	timespecclear(&p->p_tu.tu_runtime);
	p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0;

	km_free(argp, NCARGS, &kv_exec, &kp_pageable);

	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
	vn_close(pack.ep_vp, FREAD, cred, p);

	/*
	 * notify others that we exec'd
	 */
	KNOTE(&pr->ps_klist, NOTE_EXEC);

	/* setup new registers and do misc. setup. */
	if (pack.ep_emul->e_fixup != NULL) {
		if ((*pack.ep_emul->e_fixup)(p, &pack) != 0)
			goto free_pack_abort;
	}
#ifdef MACHINE_STACK_GROWS_UP
	(*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack + slen, retval);
#else
	(*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack, retval);
#endif

	/* map the process's signal trampoline code */
	if (exec_sigcode_map(pr, pack.ep_emul))
		goto free_pack_abort;

#ifdef __HAVE_EXEC_MD_MAP
	/* perform md specific mappings that process might need */
	if (exec_md_map(p, &pack))
		goto free_pack_abort;
#endif

	if (pr->ps_flags & PS_TRACED)
		psignal(p, SIGTRAP);

	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);

	/*
	 * Call emulation specific exec hook. This can setup per-process
	 * p->p_emuldata or do any other per-process stuff an emulation needs.
	 *
	 * If we are executing process of different emulation than the
	 * original forked process, call e_proc_exit() of the old emulation
	 * first, then e_proc_exec() of new emulation. If the emulation is
	 * same, the exec hook code should deallocate any old emulation
	 * resources held previously by this process.
	 */
	if (pr->ps_emul && pr->ps_emul->e_proc_exit &&
	    pr->ps_emul != pack.ep_emul)
		(*pr->ps_emul->e_proc_exit)(p);

	p->p_descfd = 255;
	if ((pack.ep_flags & EXEC_HASFD) && pack.ep_fd < 255)
		p->p_descfd = pack.ep_fd;

	/*
	 * Call exec hook. Emulation code may NOT store reference to anything
	 * from &pack.
	 */
	if (pack.ep_emul->e_proc_exec)
		(*pack.ep_emul->e_proc_exec)(p, &pack);

#if defined(KTRACE) && defined(COMPAT_LINUX)
	/* update ps_emul, but don't ktrace it if native-execing-native */
	if (pr->ps_emul != pack.ep_emul || pack.ep_emul != &emul_native) {
		pr->ps_emul = pack.ep_emul;

		if (KTRPOINT(p, KTR_EMUL))
			ktremul(p);
	}
#else
	/* update ps_emul, the old value is no longer needed */
	pr->ps_emul = pack.ep_emul;
#endif

	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
	single_thread_clear(p, P_SUSPSIG);

#if NSYSTRACE > 0
	if (ISSET(p->p_flag, P_SYSTRACE) &&
	    wassugid && !ISSET(pr->ps_flags, PS_SUGID | PS_SUGIDEXEC))
		systrace_execve1(pathbuf, p);
#endif

	if (pathbuf != NULL)
		pool_put(&namei_pool, pathbuf);

	return (0);

bad:
	/* free the vmspace-creation commands, and release their references */
	kill_vmcmds(&pack.ep_vmcmds);
	/* kill any opened file descriptor, if necessary */
	if (pack.ep_flags & EXEC_HASFD) {
		pack.ep_flags &= ~EXEC_HASFD;
		fdplock(p->p_fd);
		(void) fdrelease(p, pack.ep_fd);
		fdpunlock(p->p_fd);
	}
	if (pack.ep_interp != NULL)
		pool_put(&namei_pool, pack.ep_interp);
	if (pack.ep_emul_arg != NULL)
		free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize);
	/* close and put the exec'd file */
	vn_close(pack.ep_vp, FREAD, cred, p);
	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
	km_free(argp, NCARGS, &kv_exec, &kp_pageable);

 freehdr:
	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
#if NSYSTRACE > 0
 clrflag:
#endif
	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
	single_thread_clear(p, P_SUSPSIG);

	if (pathbuf != NULL)
		pool_put(&namei_pool, pathbuf);

	return (error);

exec_abort:
	/*
	 * the old process doesn't exist anymore.  exit gracefully.
	 * get rid of the (new) address space we have created, if any, get rid
	 * of our namei data and vnode, and exit noting failure
	 */
	uvm_deallocate(&vm->vm_map, VM_MIN_ADDRESS,
		VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS);
	if (pack.ep_interp != NULL)
		pool_put(&namei_pool, pack.ep_interp);
	if (pack.ep_emul_arg != NULL)
		free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize);
	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
	vn_close(pack.ep_vp, FREAD, cred, p);
	km_free(argp, NCARGS, &kv_exec, &kp_pageable);

free_pack_abort:
	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
	if (pathbuf != NULL)
		pool_put(&namei_pool, pathbuf);
	exit1(p, W_EXITCODE(0, SIGABRT), EXIT_NORMAL);

	/* NOTREACHED */
	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);

	return (0);
}
示例#19
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
    caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	paddr_t pa;
	bus_addr_t addr;
	int curseg, error, pmap_flags;
	const struct kmem_dyn_mode *kd;

	if (nsegs == 1) {
		pa = (*t->_device_to_pa)(segs[0].ds_addr);
		if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
			*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_NC);
		else
			*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_CACHED);
		return (0);
	}

	size = round_page(size);
	kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
	va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, kd);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	pmap_flags = PMAP_WIRED | PMAP_CANFAIL;
	if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
		pmap_flags |= PMAP_NOCACHE;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += NBPG, va += NBPG, size -= NBPG) {
			if (size == 0)
				panic("_dmamem_map: size botch");
			pa = (*t->_device_to_pa)(addr);
			error = pmap_enter(pmap_kernel(), va, pa,
			    PROT_READ | PROT_WRITE,
			    PROT_READ | PROT_WRITE | pmap_flags);
			if (error) {
				pmap_update(pmap_kernel());
				km_free((void *)sva, ssize, &kv_any, &kp_none);
				return (error);
			}

			/*
			 * This is redundant with what pmap_enter() did 
			 * above, but will take care of forcing other
			 * mappings of the same page (if any) to be
			 * uncached. 
			 * If there are no multiple mappings of that 
			 * page, this amounts to a noop.
			 */
			if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE)) 
				pmap_page_cache(PHYS_TO_VM_PAGE(pa),
				    PGF_UNCACHED);
		}
		pmap_update(pmap_kernel());
	}

	return (0);
}
示例#20
0
int
i386_set_ldt(struct proc *p, void *args, register_t *retval)
{
    int error, i, n;
    struct pcb *pcb = &p->p_addr->u_pcb;
    pmap_t pmap = p->p_vmspace->vm_map.pmap;
    struct i386_set_ldt_args ua;
    union descriptor *descv;
    size_t old_len, new_len, ldt_len;
    union descriptor *old_ldt, *new_ldt;

    if (user_ldt_enable == 0)
        return (ENOSYS);

    if ((error = copyin(args, &ua, sizeof(ua))) != 0)
        return (error);

    if (ua.start < 0 || ua.num < 0 || ua.start > 8192 || ua.num > 8192 ||
            ua.start + ua.num > 8192)
        return (EINVAL);

    descv = malloc(sizeof (*descv) * ua.num, M_TEMP, M_NOWAIT);
    if (descv == NULL)
        return (ENOMEM);

    if ((error = copyin(ua.desc, descv, sizeof (*descv) * ua.num)) != 0)
        goto out;

    /* Check descriptors for access violations. */
    for (i = 0; i < ua.num; i++) {
        union descriptor *desc = &descv[i];

        switch (desc->sd.sd_type) {
        case SDT_SYSNULL:
            desc->sd.sd_p = 0;
            break;
        case SDT_SYS286CGT:
        case SDT_SYS386CGT:
            /*
             * Only allow call gates targeting a segment
             * in the LDT or a user segment in the fixed
             * part of the gdt.  Segments in the LDT are
             * constrained (below) to be user segments.
             */
            if (desc->gd.gd_p != 0 &&
                    !ISLDT(desc->gd.gd_selector) &&
                    ((IDXSEL(desc->gd.gd_selector) >= NGDT) ||
                     (gdt[IDXSEL(desc->gd.gd_selector)].sd.sd_dpl !=
                      SEL_UPL))) {
                error = EACCES;
                goto out;
            }
            break;
        case SDT_MEMEC:
        case SDT_MEMEAC:
        case SDT_MEMERC:
        case SDT_MEMERAC:
            /* Must be "present" if executable and conforming. */
            if (desc->sd.sd_p == 0) {
                error = EACCES;
                goto out;
            }
            break;
        case SDT_MEMRO:
        case SDT_MEMROA:
        case SDT_MEMRW:
        case SDT_MEMRWA:
        case SDT_MEMROD:
        case SDT_MEMRODA:
        case SDT_MEMRWD:
        case SDT_MEMRWDA:
        case SDT_MEME:
        case SDT_MEMEA:
        case SDT_MEMER:
        case SDT_MEMERA:
            break;
        default:
            /*
             * Make sure that unknown descriptor types are
             * not marked present.
             */
            if (desc->sd.sd_p != 0) {
                error = EACCES;
                goto out;
            }
            break;
        }

        if (desc->sd.sd_p != 0) {
            /* Only user (ring-3) descriptors may be present. */
            if (desc->sd.sd_dpl != SEL_UPL) {
                error = EACCES;
                goto out;
            }
        }
    }

    /* allocate user ldt */
    simple_lock(&pmap->pm_lock);
    if (pmap->pm_ldt == 0 || (ua.start + ua.num) > pmap->pm_ldt_len) {
        if (pmap->pm_flags & PMF_USER_LDT)
            ldt_len = pmap->pm_ldt_len;
        else
            ldt_len = 512;
        while ((ua.start + ua.num) > ldt_len)
            ldt_len *= 2;
        new_len = ldt_len * sizeof(union descriptor);

        simple_unlock(&pmap->pm_lock);
        new_ldt = km_alloc(round_page(new_len), &kv_any,
                           &kp_dirty, &kd_nowait);
        if (new_ldt == NULL) {
            error = ENOMEM;
            goto out;
        }
        simple_lock(&pmap->pm_lock);

        if (pmap->pm_ldt != NULL && ldt_len <= pmap->pm_ldt_len) {
            /*
             * Another thread (re)allocated the LDT to
             * sufficient size while we were blocked in
             * km_alloc. Oh well. The new entries
             * will quite probably not be right, but
             * hey.. not our problem if user applications
             * have race conditions like that.
             */
            km_free(new_ldt, round_page(new_len), &kv_any,
                    &kp_dirty);
            goto copy;
        }

        old_ldt = pmap->pm_ldt;

        if (old_ldt != NULL) {
            old_len = pmap->pm_ldt_len * sizeof(union descriptor);
        } else {
            old_len = NLDT * sizeof(union descriptor);
            old_ldt = ldt;
        }

        memcpy(new_ldt, old_ldt, old_len);
        memset((caddr_t)new_ldt + old_len, 0, new_len - old_len);

        if (old_ldt != ldt)
            km_free(old_ldt, round_page(old_len),
                    &kv_any, &kp_dirty);

        pmap->pm_ldt = new_ldt;
        pmap->pm_ldt_len = ldt_len;

        if (pmap->pm_flags & PMF_USER_LDT)
            ldt_free(pmap);
        else
            pmap->pm_flags |= PMF_USER_LDT;
        ldt_alloc(pmap, new_ldt, new_len);
        pcb->pcb_ldt_sel = pmap->pm_ldt_sel;
        if (pcb == curpcb)
            lldt(pcb->pcb_ldt_sel);

    }
copy:
    /* Now actually replace the descriptors. */
    for (i = 0, n = ua.start; i < ua.num; i++, n++)
        pmap->pm_ldt[n] = descv[i];

    simple_unlock(&pmap->pm_lock);

    *retval = ua.start;

out:
    free(descv, M_TEMP);
    return (error);
}
示例#21
0
void
cpu_attach(struct device *parent, struct device *self, void *aux)
{
	struct cpu_softc *sc = (void *) self;
	struct cpu_attach_args *caa = aux;
	struct cpu_info *ci;
#if defined(MULTIPROCESSOR)
	int cpunum = sc->sc_dev.dv_unit;
	vaddr_t kstack;
	struct pcb *pcb;
#endif

	/*
	 * If we're an Application Processor, allocate a cpu_info
	 * structure, otherwise use the primary's.
	 */
	if (caa->cpu_role == CPU_ROLE_AP) {
		ci = malloc(sizeof(*ci), M_DEVBUF, M_WAITOK|M_ZERO);
#if defined(MULTIPROCESSOR)
		if (cpu_info[cpunum] != NULL)
			panic("cpu at apic id %d already attached?", cpunum);
		cpu_info[cpunum] = ci;
#endif
#ifdef TRAPLOG
		ci->ci_tlog_base = malloc(sizeof(struct tlog),
		    M_DEVBUF, M_WAITOK);
#endif
	} else {
		ci = &cpu_info_primary;
#if defined(MULTIPROCESSOR)
		if (caa->cpu_number != lapic_cpu_number()) {
			panic("%s: running cpu is at apic %d"
			    " instead of at expected %d",
			    sc->sc_dev.dv_xname, lapic_cpu_number(), caa->cpu_number);
		}
#endif
	}

	ci->ci_self = ci;
	sc->sc_info = ci;

	ci->ci_dev = self;
	ci->ci_apicid = caa->cpu_number;
#ifdef MULTIPROCESSOR
	ci->ci_cpuid = cpunum;
#else
	ci->ci_cpuid = 0;	/* False for APs, but they're not used anyway */
#endif
	ci->ci_func = caa->cpu_func;

	simple_lock_init(&ci->ci_slock);

#if defined(MULTIPROCESSOR)
	/*
	 * Allocate USPACE pages for the idle PCB and stack.
	 * XXX should we just sleep here?
	 */
	kstack = (vaddr_t)km_alloc(USPACE, &kv_any, &kp_zero, &kd_nowait);
	if (kstack == 0) {
		if (caa->cpu_role != CPU_ROLE_AP) {
			panic("cpu_attach: unable to allocate idle stack for"
			    " primary");
		}
		printf("%s: unable to allocate idle stack\n",
		    sc->sc_dev.dv_xname);
		return;
	}
	pcb = ci->ci_idle_pcb = (struct pcb *)kstack;

	pcb->pcb_kstack = kstack + USPACE - 16;
	pcb->pcb_rbp = pcb->pcb_rsp = kstack + USPACE - 16;
	pcb->pcb_pmap = pmap_kernel();
	pcb->pcb_cr0 = rcr0();
	pcb->pcb_cr3 = pcb->pcb_pmap->pm_pdirpa;
#endif

	/* further PCB init done later. */

	printf(": ");

	switch (caa->cpu_role) {
	case CPU_ROLE_SP:
		printf("(uniprocessor)\n");
		ci->ci_flags |= CPUF_PRESENT | CPUF_SP | CPUF_PRIMARY;
		cpu_intr_init(ci);
		identifycpu(ci);
		cpu_init(ci);
		break;

	case CPU_ROLE_BP:
		printf("apid %d (boot processor)\n", caa->cpu_number);
		ci->ci_flags |= CPUF_PRESENT | CPUF_BSP | CPUF_PRIMARY;
		cpu_intr_init(ci);
		identifycpu(ci);
		cpu_init(ci);

#if NLAPIC > 0
		/*
		 * Enable local apic
		 */
		lapic_enable();
		lapic_calibrate_timer(ci);
#endif
#if NIOAPIC > 0
		ioapic_bsp_id = caa->cpu_number;
#endif
		break;

	case CPU_ROLE_AP:
		/*
		 * report on an AP
		 */
		printf("apid %d (application processor)\n", caa->cpu_number);

#if defined(MULTIPROCESSOR)
		cpu_intr_init(ci);
		gdt_alloc_cpu(ci);
		sched_init_cpu(ci);
		cpu_start_secondary(ci);
		ncpus++;
		if (ci->ci_flags & CPUF_PRESENT) {
			ci->ci_next = cpu_info_list->ci_next;
			cpu_info_list->ci_next = ci;
		}
#else
		printf("%s: not started\n", sc->sc_dev.dv_xname);
#endif
		break;

	default:
		panic("unknown processor type??");
	}
	cpu_vm_init(ci);

#if defined(MULTIPROCESSOR)
	if (mp_verbose) {
		printf("%s: kstack at 0x%lx for %d bytes\n",
		    sc->sc_dev.dv_xname, kstack, USPACE);
		printf("%s: idle pcb at %p, idle sp at 0x%lx\n",
		    sc->sc_dev.dv_xname, pcb, pcb->pcb_rsp);
	}
#endif
}
示例#22
0
int
i80321_mem_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flag,
    bus_space_handle_t *bshp)
{

	struct i80321_softc *sc = t;
	vaddr_t va;
	uint32_t busbase;
	paddr_t pa, endpa, physbase;
	pt_entry_t *pte;

#if 0
printf("i80321_bs_map bpa %x, size %x flag %x : %x %x \n", bpa, size, flag,
   sc->sc_owin[0].owin_xlate_lo,
   sc->sc_owin[0].owin_xlate_lo+ VERDE_OUT_XLATE_MEM_WIN_SIZE);
#endif

	if (bpa >= sc->sc_owin[0].owin_xlate_lo &&
	    bpa < (sc->sc_owin[0].owin_xlate_lo +
		   VERDE_OUT_XLATE_MEM_WIN_SIZE)) {
		busbase = sc->sc_iwin[1].iwin_xlate;
		physbase = sc->sc_owin[0].owin_xlate_lo;
	} else
		return (EINVAL);

	if ((bpa + size) >= ( sc->sc_owin[0].owin_xlate_lo +
	    VERDE_OUT_XLATE_MEM_WIN_SIZE))
		return (EINVAL);

	/*
	 * Found the window -- PCI MEM space is now mapped by allocating
	 * some kernel VA space and mapping the pages with pmap_enter().
	 * pmap_enter() will map unmanaged pages as non-cacheable.
	 */
	pa = trunc_page((bpa - busbase) + physbase);
	endpa = round_page(((bpa - busbase) + physbase) + size);

	va = (vaddr_t)km_alloc(endpa - pa, &kv_any, &kp_none, &kd_nowait);
	if (va == 0)
		return (ENOMEM);
//printf("i80321_mem_bs_map bpa %x pa %x va %x sz %x\n", bpa, pa, va, endpa-pa);

#if 0
printf("i80321_bs_map va %x pa %x, endpa %x, sz %x\n", va, pa,
    endpa, endpa-pa);
#endif

	*bshp = va + (bpa & PAGE_MASK);

	for (; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
		if ((flag & BUS_SPACE_MAP_CACHEABLE) == 0) {
			pte = vtopte(va);
			*pte &= ~L2_S_CACHE_MASK;
			PTE_SYNC(pte);
		}
	}
	pmap_update(pmap_kernel());

	return (0);
}