Esempio n. 1
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	int error = 0;
	vm_offset_t va, eva, off, v;
	vm_prot_t prot;
	struct vm_page m;
	vm_page_t marr;
	vm_size_t cnt;

	cnt = 0;
	error = 0;

	while (uio->uio_resid > 0 && !error) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			v = uio->uio_offset;

kmem_direct_mapped:	off = v & PAGE_MASK;
			cnt = PAGE_SIZE - ((vm_offset_t)iov->iov_base &
			    PAGE_MASK);
			cnt = min(cnt, PAGE_SIZE - off);
			cnt = min(cnt, iov->iov_len);

			if (mem_valid(v, cnt)) {
				error = EFAULT;
				break;
			}
	
			if (hw_direct_map && !pmap_dev_direct_mapped(v, cnt)) {
				error = uiomove((void *)PHYS_TO_DMAP(v), cnt,
				    uio);
			} else {
				m.phys_addr = trunc_page(v);
				marr = &m;
				error = uiomove_fromphys(&marr, off, cnt, uio);
			}
		}
		else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
			va = uio->uio_offset;

			if ((va < VM_MIN_KERNEL_ADDRESS) || (va > virtual_end)) {
				v = DMAP_TO_PHYS(va);
				goto kmem_direct_mapped;
			}

			va = trunc_page(uio->uio_offset);
			eva = round_page(uio->uio_offset
			    + iov->iov_len);

			/* 
			 * Make sure that all the pages are currently resident
			 * so that we don't create any zero-fill pages.
			 */

			for (; va < eva; va += PAGE_SIZE)
				if (pmap_extract(kernel_pmap, va) == 0)
					return (EFAULT);

			prot = (uio->uio_rw == UIO_READ)
			    ? VM_PROT_READ : VM_PROT_WRITE;

			va = uio->uio_offset;
			if (kernacc((void *) va, iov->iov_len, prot)
			    == FALSE)
				return (EFAULT);

			error = uiomove((void *)va, iov->iov_len, uio);

			continue;
		}
	}

	return (error);
}
Esempio n. 2
0
/*
 * Write bytes to kernel address space for debugger.
 */
int
db_write_bytes(vm_offset_t addr, size_t size, char *data)
{
	jmp_buf jb;
	void *prev_jb;
	char *dst;
	pt_entry_t	*ptep0 = NULL;
	pt_entry_t	oldmap0 = 0;
	vm_offset_t	addr1;
	pt_entry_t	*ptep1 = NULL;
	pt_entry_t	oldmap1 = 0;
	int ret;

	prev_jb = kdb_jmpbuf(jb);
	ret = setjmp(jb);
	if (ret == 0) {
		if (addr > trunc_page((vm_offset_t)btext) - size &&
		    addr < round_page((vm_offset_t)etext)) {

			ptep0 = vtopte(addr);
			oldmap0 = *ptep0;
			*ptep0 |= PG_RW;

			/*
			 * Map another page if the data crosses a page
			 * boundary.
			 */
			if ((*ptep0 & PG_PS) == 0) {
				addr1 = trunc_page(addr + size - 1);
				if (trunc_page(addr) != addr1) {
					ptep1 = vtopte(addr1);
					oldmap1 = *ptep1;
					*ptep1 |= PG_RW;
				}
			} else {
				addr1 = trunc_2mpage(addr + size - 1);
				if (trunc_2mpage(addr) != addr1) {
					ptep1 = vtopte(addr1);
					oldmap1 = *ptep1;
					*ptep1 |= PG_RW;
				}
			}

			invltlb();
		}

		dst = (char *)addr;

		while (size-- > 0)
			*dst++ = *data++;
	}

	(void)kdb_jmpbuf(prev_jb);

	if (ptep0) {
		*ptep0 = oldmap0;

		if (ptep1)
			*ptep1 = oldmap1;

		invltlb();
	}

	return (ret);
}
Esempio n. 3
0
vaddr_t
netbsd32_vm_default_addr(struct proc *p, vaddr_t base, vsize_t size)
{
    return round_page((vaddr_t)(base) + (vsize_t)MAXDSIZ32);
}
Esempio n. 4
0
/*
 * Map a shared object into memory.  The "fd" argument is a file descriptor,
 * which must be open on the object and positioned at its beginning.
 * The "path" argument is a pathname that is used only for error messages.
 *
 * The return value is a pointer to a newly-allocated Obj_Entry structure
 * for the shared object.  Returns NULL on failure.
 */
Obj_Entry *
map_object(int fd, const char *path, const struct stat *sb)
{
    Obj_Entry *obj;
    Elf_Ehdr *hdr;
    int i;
    Elf_Phdr *phdr;
    Elf_Phdr *phlimit;
    Elf_Phdr **segs;
    int nsegs;
    Elf_Phdr *phdyn;
    Elf_Phdr *phinterp;
    Elf_Phdr *phtls;
    caddr_t mapbase;
    size_t mapsize;
    Elf_Addr base_vaddr;
    Elf_Addr base_vlimit;
    caddr_t base_addr;
    Elf_Off data_offset;
    Elf_Addr data_vaddr;
    Elf_Addr data_vlimit;
    caddr_t data_addr;
    int data_prot;
    int data_flags;
    Elf_Addr clear_vaddr;
    caddr_t clear_addr;
    caddr_t clear_page;
    Elf_Addr phdr_vaddr;
    size_t nclear, phsize;
    Elf_Addr bss_vaddr;
    Elf_Addr bss_vlimit;
    caddr_t bss_addr;
    Elf_Word stack_flags;
    Elf_Addr relro_page;
    size_t relro_size;
    Elf_Addr note_start;
    Elf_Addr note_end;

    hdr = get_elf_header(fd, path);
    if (hdr == NULL)
	return (NULL);

    /*
     * Scan the program header entries, and save key information.
     *
     * We expect that the loadable segments are ordered by load address.
     */
    phdr = (Elf_Phdr *) ((char *)hdr + hdr->e_phoff);
    phsize  = hdr->e_phnum * sizeof (phdr[0]);
    phlimit = phdr + hdr->e_phnum;
    nsegs = -1;
    phdyn = phinterp = phtls = NULL;
    phdr_vaddr = 0;
    relro_page = 0;
    relro_size = 0;
    note_start = 0;
    note_end = 0;
    segs = alloca(sizeof(segs[0]) * hdr->e_phnum);
    stack_flags = RTLD_DEFAULT_STACK_PF_EXEC | PF_R | PF_W;
    while (phdr < phlimit) {
	switch (phdr->p_type) {

	case PT_INTERP:
	    phinterp = phdr;
	    break;

	case PT_LOAD:
	    segs[++nsegs] = phdr;
    	    if ((segs[nsegs]->p_align & (PAGE_SIZE - 1)) != 0) {
		_rtld_error("%s: PT_LOAD segment %d not page-aligned",
		    path, nsegs);
		goto error;
	    }
	    break;

	case PT_PHDR:
	    phdr_vaddr = phdr->p_vaddr;
	    phsize = phdr->p_memsz;
	    break;

	case PT_DYNAMIC:
	    phdyn = phdr;
	    break;

	case PT_TLS:
	    phtls = phdr;
	    break;

	case PT_GNU_STACK:
	    stack_flags = phdr->p_flags;
	    break;

	case PT_GNU_RELRO:
	    relro_page = phdr->p_vaddr;
	    relro_size = phdr->p_memsz;
	    break;

	case PT_NOTE:
	    if (phdr->p_offset > PAGE_SIZE ||
	      phdr->p_offset + phdr->p_filesz > PAGE_SIZE)
		break;
	    note_start = (Elf_Addr)(char *)hdr + phdr->p_offset;
	    note_end = note_start + phdr->p_filesz;
	    break;
	}

	++phdr;
    }
    if (phdyn == NULL) {
	_rtld_error("%s: object is not dynamically-linked", path);
	goto error;
    }

    if (nsegs < 0) {
	_rtld_error("%s: too few PT_LOAD segments", path);
	goto error;
    }

    /*
     * Map the entire address space of the object, to stake out our
     * contiguous region, and to establish the base address for relocation.
     */
    base_vaddr = trunc_page(segs[0]->p_vaddr);
    base_vlimit = round_page(segs[nsegs]->p_vaddr + segs[nsegs]->p_memsz);
    mapsize = base_vlimit - base_vaddr;
    base_addr = (caddr_t) base_vaddr;

    mapbase = mmap(base_addr, mapsize, PROT_NONE, MAP_ANON | MAP_PRIVATE |
      MAP_NOCORE, -1, 0);
    if (mapbase == (caddr_t) -1) {
	_rtld_error("%s: mmap of entire address space failed: %s",
	  path, rtld_strerror(errno));
	goto error;
    }
    if (base_addr != NULL && mapbase != base_addr) {
	_rtld_error("%s: mmap returned wrong address: wanted %p, got %p",
	  path, base_addr, mapbase);
	goto error1;
    }

    for (i = 0; i <= nsegs; i++) {
	/* Overlay the segment onto the proper region. */
	data_offset = trunc_page(segs[i]->p_offset);
	data_vaddr = trunc_page(segs[i]->p_vaddr);
	data_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_filesz);
	data_addr = mapbase + (data_vaddr - base_vaddr);
	data_prot = convert_prot(segs[i]->p_flags);
	data_flags = convert_flags(segs[i]->p_flags) | MAP_FIXED;
	if (mmap(data_addr, data_vlimit - data_vaddr, data_prot,
	  data_flags | MAP_PREFAULT_READ, fd, data_offset) == (caddr_t) -1) {
	    _rtld_error("%s: mmap of data failed: %s", path,
		rtld_strerror(errno));
	    goto error1;
	}

	/* Do BSS setup */
	if (segs[i]->p_filesz != segs[i]->p_memsz) {

	    /* Clear any BSS in the last page of the segment. */
	    clear_vaddr = segs[i]->p_vaddr + segs[i]->p_filesz;
	    clear_addr = mapbase + (clear_vaddr - base_vaddr);
	    clear_page = mapbase + (trunc_page(clear_vaddr) - base_vaddr);

	    if ((nclear = data_vlimit - clear_vaddr) > 0) {
		/* Make sure the end of the segment is writable */
		if ((data_prot & PROT_WRITE) == 0 && -1 ==
		     mprotect(clear_page, PAGE_SIZE, data_prot|PROT_WRITE)) {
			_rtld_error("%s: mprotect failed: %s", path,
			    rtld_strerror(errno));
			goto error1;
		}

		memset(clear_addr, 0, nclear);

		/* Reset the data protection back */
		if ((data_prot & PROT_WRITE) == 0)
		    mprotect(clear_page, PAGE_SIZE, data_prot);
	    }

	    /* Overlay the BSS segment onto the proper region. */
	    bss_vaddr = data_vlimit;
	    bss_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_memsz);
	    bss_addr = mapbase +  (bss_vaddr - base_vaddr);
	    if (bss_vlimit > bss_vaddr) {	/* There is something to do */
		if (mmap(bss_addr, bss_vlimit - bss_vaddr, data_prot,
		    data_flags | MAP_ANON, -1, 0) == (caddr_t)-1) {
		    _rtld_error("%s: mmap of bss failed: %s", path,
			rtld_strerror(errno));
		    goto error1;
		}
	    }
	}

	if (phdr_vaddr == 0 && data_offset <= hdr->e_phoff &&
	  (data_vlimit - data_vaddr + data_offset) >=
	  (hdr->e_phoff + hdr->e_phnum * sizeof (Elf_Phdr))) {
	    phdr_vaddr = data_vaddr + hdr->e_phoff - data_offset;
	}
    }

    obj = obj_new();
    if (sb != NULL) {
	obj->dev = sb->st_dev;
	obj->ino = sb->st_ino;
    }
    obj->mapbase = mapbase;
    obj->mapsize = mapsize;
    obj->textsize = round_page(segs[0]->p_vaddr + segs[0]->p_memsz) -
      base_vaddr;
    obj->vaddrbase = base_vaddr;
    obj->relocbase = mapbase - base_vaddr;
    obj->dynamic = (const Elf_Dyn *) (obj->relocbase + phdyn->p_vaddr);
    if (hdr->e_entry != 0)
	obj->entry = (caddr_t) (obj->relocbase + hdr->e_entry);
    if (phdr_vaddr != 0) {
	obj->phdr = (const Elf_Phdr *) (obj->relocbase + phdr_vaddr);
    } else {
	obj->phdr = malloc(phsize);
	if (obj->phdr == NULL) {
	    obj_free(obj);
	    _rtld_error("%s: cannot allocate program header", path);
	    goto error1;
	}
	memcpy((char *)obj->phdr, (char *)hdr + hdr->e_phoff, phsize);
	obj->phdr_alloc = true;
    }
    obj->phsize = phsize;
    if (phinterp != NULL)
	obj->interp = (const char *) (obj->relocbase + phinterp->p_vaddr);
    if (phtls != NULL) {
	tls_dtv_generation++;
	obj->tlsindex = ++tls_max_index;
	obj->tlssize = phtls->p_memsz;
	obj->tlsalign = phtls->p_align;
	obj->tlsinitsize = phtls->p_filesz;
	obj->tlsinit = mapbase + phtls->p_vaddr;
    }
    obj->stack_flags = stack_flags;
    obj->relro_page = obj->relocbase + trunc_page(relro_page);
    obj->relro_size = round_page(relro_size);
    if (note_start < note_end)
	digest_notes(obj, note_start, note_end);
    munmap(hdr, PAGE_SIZE);
    return (obj);

error1:
    munmap(mapbase, mapsize);
error:
    munmap(hdr, PAGE_SIZE);
    return (NULL);
}
Esempio n. 5
0
static int
am335x_lcd_attach(device_t dev)
{
	struct am335x_lcd_softc *sc;
	int rid;
	int div;
	struct panel_info panel;
	uint32_t reg, timing0, timing1, timing2;
	struct sysctl_ctx_list *ctx;
	struct sysctl_oid *tree;
	uint32_t burst_log;
	int err;
	size_t dma_size;
	uint32_t hbp, hfp, hsw;
	uint32_t vbp, vfp, vsw;
	uint32_t width, height;
	phandle_t root, panel_node;

	sc = device_get_softc(dev);
	sc->sc_dev = dev;

	root = OF_finddevice("/");
	if (root == 0) {
		device_printf(dev, "failed to get FDT root node\n");
		return (ENXIO);
	}

	panel_node = fdt_find_compatible(root, "ti,tilcdc,panel", 1);
	if (panel_node == 0) {
		device_printf(dev, "failed to find compatible panel in FDT blob\n");
		return (ENXIO);
	}

	if (am335x_read_panel_info(dev, panel_node, &panel)) {
		device_printf(dev, "failed to read panel info\n");
		return (ENXIO);
	}

	if (am335x_read_timing(dev, panel_node, &panel)) {
		device_printf(dev, "failed to read timings\n");
		return (ENXIO);
	}

	int ref_freq = 0;
	ti_prcm_clk_enable(LCDC_CLK);
	if (ti_prcm_clk_get_source_freq(LCDC_CLK, &ref_freq)) {
		device_printf(dev, "Can't get reference frequency\n");
		return (ENXIO);
	}

	rid = 0;
	sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
	    RF_ACTIVE);
	if (!sc->sc_mem_res) {
		device_printf(dev, "cannot allocate memory window\n");
		return (ENXIO);
	}

	rid = 0;
	sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
	    RF_ACTIVE);
	if (!sc->sc_irq_res) {
		bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res);
		device_printf(dev, "cannot allocate interrupt\n");
		return (ENXIO);
	}

	if (bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
			NULL, am335x_lcd_intr, sc,
			&sc->sc_intr_hl) != 0) {
		bus_release_resource(dev, SYS_RES_IRQ, rid,
		    sc->sc_irq_res);
		bus_release_resource(dev, SYS_RES_MEMORY, rid,
		    sc->sc_mem_res);
		device_printf(dev, "Unable to setup the irq handler.\n");
		return (ENXIO);
	}

	LCD_LOCK_INIT(sc);

	/* Panle initialization */
	dma_size = round_page(panel.panel_width*panel.panel_height*panel.bpp/8);

	/*
	 * Now allocate framebuffer memory
	 */
	err = bus_dma_tag_create(
	    bus_get_dma_tag(dev),
	    4, 0,		/* alignment, boundary */
	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
	    BUS_SPACE_MAXADDR,		/* highaddr */
	    NULL, NULL,			/* filter, filterarg */
	    dma_size, 1,			/* maxsize, nsegments */
	    dma_size, 0,			/* maxsegsize, flags */
	    NULL, NULL,			/* lockfunc, lockarg */
	    &sc->sc_dma_tag);
	if (err)
		goto fail;

	err = bus_dmamem_alloc(sc->sc_dma_tag, (void **)&sc->sc_fb_base,
	    BUS_DMA_COHERENT, &sc->sc_dma_map);

	if (err) {
		device_printf(dev, "cannot allocate framebuffer\n");
		goto fail;
	}

	err = bus_dmamap_load(sc->sc_dma_tag, sc->sc_dma_map, sc->sc_fb_base,
	    dma_size, am335x_fb_dmamap_cb, &sc->sc_fb_phys, BUS_DMA_NOWAIT);

	if (err) {
		device_printf(dev, "cannot load DMA map\n");
		goto fail;
	}

	/* Make sure it's blank */
	memset(sc->sc_fb_base, 0x00, dma_size);

	/* Calculate actual FB Size */
	sc->sc_fb_size = panel.panel_width*panel.panel_height*panel.bpp/8;

	/* Only raster mode is supported */
	reg = CTRL_RASTER_MODE;
	div = am335x_lcd_calc_divisor(ref_freq, panel.panel_pxl_clk);
	reg |= (div << CTRL_DIV_SHIFT);
	LCD_WRITE4(sc, LCD_CTRL, reg); 

	/* Set timing */
	timing0 = timing1 = timing2 = 0;

	hbp = panel.panel_hbp - 1;
	hfp = panel.panel_hfp - 1;
	hsw = panel.panel_hsw - 1;

	vbp = panel.panel_vbp;
	vfp = panel.panel_vfp;
	vsw = panel.panel_vsw - 1;

	height = panel.panel_height - 1;
	width = panel.panel_width - 1;

	/* Horizontal back porch */
	timing0 |= (hbp & 0xff) << RASTER_TIMING_0_HBP_SHIFT;
	timing2 |= ((hbp >> 8) & 3) << RASTER_TIMING_2_HBPHI_SHIFT;
	/* Horizontal front porch */
	timing0 |= (hfp & 0xff) << RASTER_TIMING_0_HFP_SHIFT;
	timing2 |= ((hfp >> 8) & 3) << RASTER_TIMING_2_HFPHI_SHIFT;
	/* Horizontal sync width */
	timing0 |= (hsw & 0x3f) << RASTER_TIMING_0_HSW_SHIFT;
	timing2 |= ((hsw >> 6) & 0xf) << RASTER_TIMING_2_HSWHI_SHIFT;

	/* Vertical back porch, front porch, sync width */
	timing1 |= (vbp & 0xff) << RASTER_TIMING_1_VBP_SHIFT;
	timing1 |= (vfp & 0xff) << RASTER_TIMING_1_VFP_SHIFT;
	timing1 |= (vsw & 0x3f) << RASTER_TIMING_1_VSW_SHIFT;

	/* Pixels per line */
	timing0 |= ((width >> 10) & 1)
	    << RASTER_TIMING_0_PPLMSB_SHIFT;
	timing0 |= ((width >> 4) & 0x3f)
	    << RASTER_TIMING_0_PPLLSB_SHIFT;

	/* Lines per panel */
	timing1 |= (height & 0x3ff) 
	    << RASTER_TIMING_1_LPP_SHIFT;
	timing2 |= ((height >> 10 ) & 1) 
	    << RASTER_TIMING_2_LPP_B10_SHIFT;

	/* clock signal settings */
	if (panel.sync_ctrl)
		timing2 |= RASTER_TIMING_2_PHSVS;
	if (panel.sync_edge)
		timing2 |= RASTER_TIMING_2_PHSVS_RISE;
	else
		timing2 |= RASTER_TIMING_2_PHSVS_FALL;
	if (panel.hsync_active == 0)
		timing2 |= RASTER_TIMING_2_IHS;
	if (panel.vsync_active == 0)
		timing2 |= RASTER_TIMING_2_IVS;
	if (panel.pixelclk_active == 0)
		timing2 |= RASTER_TIMING_2_IPC;

	/* AC bias */
	timing2 |= (panel.ac_bias << RASTER_TIMING_2_ACB_SHIFT);
	timing2 |= (panel.ac_bias_intrpt << RASTER_TIMING_2_ACBI_SHIFT);

	LCD_WRITE4(sc, LCD_RASTER_TIMING_0, timing0); 
	LCD_WRITE4(sc, LCD_RASTER_TIMING_1, timing1); 
	LCD_WRITE4(sc, LCD_RASTER_TIMING_2, timing2); 

	/* DMA settings */
	reg = LCDDMA_CTRL_FB0_FB1;
	/* Find power of 2 for current burst size */
	switch (panel.dma_burst_sz) {
	case 1:
		burst_log = 0;
		break;
	case 2:
		burst_log = 1;
		break;
	case 4:
		burst_log = 2;
		break;
	case 8:
		burst_log = 3;
		break;
	case 16:
	default:
		burst_log = 4;
		break;
	}
	reg |= (burst_log << LCDDMA_CTRL_BURST_SIZE_SHIFT);
	/* XXX: FIFO TH */
	reg |= (0 << LCDDMA_CTRL_TH_FIFO_RDY_SHIFT);
	LCD_WRITE4(sc, LCD_LCDDMA_CTRL, reg); 

	LCD_WRITE4(sc, LCD_LCDDMA_FB0_BASE, sc->sc_fb_phys); 
	LCD_WRITE4(sc, LCD_LCDDMA_FB0_CEILING, sc->sc_fb_phys + sc->sc_fb_size - 1); 
	LCD_WRITE4(sc, LCD_LCDDMA_FB1_BASE, sc->sc_fb_phys); 
	LCD_WRITE4(sc, LCD_LCDDMA_FB1_CEILING, sc->sc_fb_phys + sc->sc_fb_size - 1); 

	/* Enable LCD */
	reg = RASTER_CTRL_LCDTFT;
	reg |= (panel.fdd << RASTER_CTRL_REQDLY_SHIFT);
	reg |= (PALETTE_DATA_ONLY << RASTER_CTRL_PALMODE_SHIFT);
	if (panel.bpp >= 24)
		reg |= RASTER_CTRL_TFT24;
	if (panel.bpp == 32)
		reg |= RASTER_CTRL_TFT24_UNPACKED;
	LCD_WRITE4(sc, LCD_RASTER_CTRL, reg); 

	LCD_WRITE4(sc, LCD_CLKC_ENABLE,
	    CLKC_ENABLE_DMA | CLKC_ENABLE_LDID | CLKC_ENABLE_CORE);

	LCD_WRITE4(sc, LCD_CLKC_RESET, CLKC_RESET_MAIN);
	DELAY(100);
	LCD_WRITE4(sc, LCD_CLKC_RESET, 0);

	reg = IRQ_EOF1 | IRQ_EOF0 | IRQ_FUF | IRQ_PL |
	    IRQ_ACB | IRQ_SYNC_LOST |  IRQ_RASTER_DONE |
	    IRQ_FRAME_DONE;
	LCD_WRITE4(sc, LCD_IRQENABLE_SET, reg);

	reg = LCD_READ4(sc, LCD_RASTER_CTRL);
 	reg |= RASTER_CTRL_LCDEN;
	LCD_WRITE4(sc, LCD_RASTER_CTRL, reg); 

	LCD_WRITE4(sc, LCD_SYSCONFIG,
	    SYSCONFIG_STANDBY_SMART | SYSCONFIG_IDLE_SMART); 

	/* Init backlight interface */
	ctx = device_get_sysctl_ctx(sc->sc_dev);
	tree = device_get_sysctl_tree(sc->sc_dev);
	sc->sc_oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
	    "backlight", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
	    am335x_lcd_sysctl_backlight, "I", "LCD backlight");
	sc->sc_backlight = 0;
	/* Check if eCAS interface is available at this point */
	if (am335x_pwm_config_ecap(PWM_UNIT,
	    PWM_PERIOD, PWM_PERIOD) == 0)
		sc->sc_backlight = 100;

	sc->sc_fb_info.fb_name = device_get_nameunit(sc->sc_dev);
	sc->sc_fb_info.fb_vbase = (intptr_t)sc->sc_fb_base;
	sc->sc_fb_info.fb_pbase = sc->sc_fb_phys;
	sc->sc_fb_info.fb_size = sc->sc_fb_size;
	sc->sc_fb_info.fb_bpp = sc->sc_fb_info.fb_depth = panel.bpp;
	sc->sc_fb_info.fb_stride = panel.panel_width*panel.bpp / 8;
	sc->sc_fb_info.fb_width = panel.panel_width;
	sc->sc_fb_info.fb_height = panel.panel_height;

#ifdef	DEV_SC
	err = (sc_attach_unit(device_get_unit(dev),
	    device_get_flags(dev) | SC_AUTODETECT_KBD));

	if (err) {
		device_printf(dev, "failed to attach syscons\n");
		goto fail;
	}

	am335x_lcd_syscons_setup((vm_offset_t)sc->sc_fb_base, sc->sc_fb_phys, &panel);
#else /* VT */
	device_t fbd = device_add_child(dev, "fbd",
	device_get_unit(dev));
	if (fbd == NULL) {
		device_printf(dev, "Failed to add fbd child\n");
		goto fail;
	}
	if (device_probe_and_attach(fbd) != 0) {
		device_printf(dev, "Failed to attach fbd device\n");
		goto fail;
	}
#endif

	return (0);

fail:
	return (err);
}
Esempio n. 6
0
int
do_posix_fadvise(int fd, off_t offset, off_t len, int advice)
{
	file_t *fp;
	vnode_t *vp;
	off_t endoffset;
	int error;

	CTASSERT(POSIX_FADV_NORMAL == UVM_ADV_NORMAL);
	CTASSERT(POSIX_FADV_RANDOM == UVM_ADV_RANDOM);
	CTASSERT(POSIX_FADV_SEQUENTIAL == UVM_ADV_SEQUENTIAL);

	if (len == 0) {
		endoffset = INT64_MAX;
	} else if (len > 0 && (INT64_MAX - offset) >= len) {
		endoffset = offset + len;
	} else {
		return EINVAL;
	}
	if ((fp = fd_getfile(fd)) == NULL) {
		return EBADF;
	}
	if (fp->f_type != DTYPE_VNODE) {
		if (fp->f_type == DTYPE_PIPE || fp->f_type == DTYPE_SOCKET) {
			error = ESPIPE;
		} else {
			error = EOPNOTSUPP;
		}
		fd_putfile(fd);
		return error;
	}

	switch (advice) {
	case POSIX_FADV_WILLNEED:
	case POSIX_FADV_DONTNEED:
		vp = fp->f_vnode;
		if (vp->v_type != VREG && vp->v_type != VBLK) {
			fd_putfile(fd);
			return 0;
		}
		break;
	}

	switch (advice) {
	case POSIX_FADV_NORMAL:
	case POSIX_FADV_RANDOM:
	case POSIX_FADV_SEQUENTIAL:
		/*
		 * We ignore offset and size.  Must lock the file to
		 * do this, as f_advice is sub-word sized.
		 */
		mutex_enter(&fp->f_lock);
		fp->f_advice = (u_char)advice;
		mutex_exit(&fp->f_lock);
		error = 0;
		break;

	case POSIX_FADV_WILLNEED:
		vp = fp->f_vnode;
		error = uvm_readahead(&vp->v_uobj, offset, endoffset - offset);
		break;

	case POSIX_FADV_DONTNEED:
		vp = fp->f_vnode;
		/*
		 * Align the region to page boundaries as VOP_PUTPAGES expects
		 * by shrinking it.  We shrink instead of expand because we
		 * do not want to deactivate cache outside of the requested
		 * region.  It means that if the specified region is smaller
		 * than PAGE_SIZE, we do nothing.
		 */
		if (round_page(offset) < trunc_page(endoffset) &&
		    offset <= round_page(offset)) {
			mutex_enter(vp->v_interlock);
			error = VOP_PUTPAGES(vp,
			    round_page(offset), trunc_page(endoffset),
			    PGO_DEACTIVATE | PGO_CLEANIT);
		} else {
			error = 0;
		}
		break;

	case POSIX_FADV_NOREUSE:
		/* Not implemented yet. */
		error = 0;
		break;
	default:
		error = EINVAL;
		break;
	}

	fd_putfile(fd);
	return error;
}
Esempio n. 7
0
/*
 * u_int initarm(...)
 *
 * Initial entry point on startup. This gets called before main() is
 * entered.
 * It should be responsible for setting up everything that must be
 * in place when main is called.
 * This includes
 *   Taking a copy of the boot configuration structure.
 *   Initialising the physical console so characters can be printed.
 *   Setting up page tables for the kernel
 *   Relocating the kernel to the bottom of physical memory
 */
u_int
initarm(void *arg)
{
	extern vaddr_t xscale_cache_clean_addr;
	int loop;
	int loop1;
	u_int l1pagetable;
#ifdef DIAGNOSTIC
	extern vsize_t xscale_minidata_clean_size; /* used in KASSERT */
#endif

	/* Register devmap for devices we mapped in start */
	pmap_devmap_register(viper_devmap);

	/* start 32.768 kHz OSC */
	ioreg_write(VIPER_CLKMAN_VBASE + 0x08, 2);
	/* Get ready for splfoo() */
	pxa2x0_intr_bootstrap(VIPER_INTCTL_VBASE);

	/*
	 * Heads up ... Setup the CPU / MMU / TLB functions
	 */
	if (set_cpufuncs())
		panic("cpu not recognized!");

#if 0
	/* Calibrate the delay loop. */
#endif

	/* setup GPIO for BTUART, in case bootloader doesn't take care of it */
	pxa2x0_gpio_bootstrap(VIPER_GPIO_VBASE);
	pxa2x0_gpio_config(viper_gpioconf);

	/* turn on clock to UART block.
	   XXX: this should not be done here. */
	ioreg_write(VIPER_CLKMAN_VBASE+CLKMAN_CKEN, CKEN_FFUART|CKEN_BTUART |
	    ioreg_read(VIPER_CLKMAN_VBASE+CLKMAN_CKEN));

	consinit();
#ifdef KGDB
	kgdb_port_init();
#endif
	/* Talk to the user */
	printf("\nNetBSD/evbarm (viper) booting ...\n");

#if 0
	/*
	 * Examine the boot args string for options we need to know about
	 * now.
	 */
	process_kernel_args((char *)nwbootinfo.bt_args);
#endif

	printf("initarm: Configuring system ...\n");

	/* Fake bootconfig structure for the benefit of pmap.c */
	/* XXX must make the memory description h/w independent */
	bootconfig.dramblocks = 1;
	bootconfig.dram[0].address = MEMSTART;
	bootconfig.dram[0].pages = MEMSIZE / PAGE_SIZE;

	/*
	 * Set up the variables that define the availablilty of
	 * physical memory.  For now, we're going to set
	 * physical_freestart to 0xa0200000 (where the kernel
	 * was loaded), and allocate the memory we need downwards.
	 * If we get too close to the page tables that RedBoot
	 * set up, we will panic.  We will update physical_freestart
	 * and physical_freeend later to reflect what pmap_bootstrap()
	 * wants to see.
	 *
	 * XXX pmap_bootstrap() needs an enema.
	 * (now that would be truly hardcore XXX)
	 */
	physical_start = bootconfig.dram[0].address;
	physical_end = physical_start + (bootconfig.dram[0].pages * PAGE_SIZE);

	physical_freestart = 0xa0009000UL;
	physical_freeend = 0xa0200000UL;

	physmem = (physical_end - physical_start) / PAGE_SIZE;

#ifdef VERBOSE_INIT_ARM
	/* Tell the user about the memory */
	printf("physmemory: %d pages at 0x%08lx -> 0x%08lx\n", physmem,
	    physical_start, physical_end - 1);
#endif

	/*
	 * Okay, the kernel starts 2MB in from the bottom of physical
	 * memory.  We are going to allocate our bootstrap pages downwards
	 * from there.
	 *
	 * We need to allocate some fixed page tables to get the kernel
	 * going.  We allocate one page directory and a number of page
	 * tables and store the physical addresses in the kernel_pt_table
	 * array.
	 *
	 * The kernel page directory must be on a 16K boundary.  The page
	 * tables must be on 4K boundaries.  What we do is allocate the
	 * page directory on the first 16K boundary that we encounter, and
	 * the page tables on 4K boundaries otherwise.  Since we allocate
	 * at least 3 L2 page tables, we are guaranteed to encounter at
	 * least one 16K aligned region.
	 */

#ifdef VERBOSE_INIT_ARM
	printf("Allocating page tables\n");
#endif

	free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE;

#ifdef VERBOSE_INIT_ARM
	printf("freestart = 0x%08lx, free_pages = %d (0x%08x)\n",
	       physical_freestart, free_pages, free_pages);
#endif

	/* Define a macro to simplify memory allocation */
#define	valloc_pages(var, np)				\
	alloc_pages((var).pv_pa, (np));			\
	(var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start;

#define alloc_pages(var, np)				\
	physical_freeend -= ((np) * PAGE_SIZE);		\
	if (physical_freeend < physical_freestart)	\
		panic("initarm: out of memory");	\
	(var) = physical_freeend;			\
	free_pages -= (np);				\
	memset((char *)(var), 0, ((np) * PAGE_SIZE));

	loop1 = 0;
	for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) {
		/* Are we 16KB aligned for an L1 ? */
		if (((physical_freeend - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) == 0
		    && kernel_l1pt.pv_pa == 0) {
			valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
		} else {
			valloc_pages(kernel_pt_table[loop1],
			    L2_TABLE_SIZE / PAGE_SIZE);
			++loop1;
		}
	}

	/* This should never be able to happen but better confirm that. */
	if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0)
		panic("initarm: Failed to align the kernel page directory");

	/*
	 * Allocate a page for the system page mapped to V0x00000000
	 * This page will just contain the system vectors and can be
	 * shared by all processes.
	 */
	alloc_pages(systempage.pv_pa, 1);

	/* Allocate stacks for all modes */
	valloc_pages(irqstack, IRQ_STACK_SIZE);
	valloc_pages(abtstack, ABT_STACK_SIZE);
	valloc_pages(undstack, UND_STACK_SIZE);
	valloc_pages(kernelstack, UPAGES);

	/* Allocate enough pages for cleaning the Mini-Data cache. */
	KASSERT(xscale_minidata_clean_size <= PAGE_SIZE);
	valloc_pages(minidataclean, 1);

#ifdef VERBOSE_INIT_ARM
	printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa,
	    irqstack.pv_va); 
	printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa,
	    abtstack.pv_va); 
	printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa,
	    undstack.pv_va); 
	printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa,
	    kernelstack.pv_va); 
#endif

	/*
	 * XXX Defer this to later so that we can reclaim the memory
	 * XXX used by the RedBoot page tables.
	 */
	alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);

	/*
	 * Ok we have allocated physical pages for the primary kernel
	 * page tables
	 */

#ifdef VERBOSE_INIT_ARM
	printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa);
#endif

	/*
	 * Now we start construction of the L1 page table
	 * We start by mapping the L2 page tables into the L1.
	 * This means that we can replace L1 mappings later on if necessary
	 */
	l1pagetable = kernel_l1pt.pv_pa;

	/* Map the L2 pages tables in the L1 page table */
	pmap_link_l2pt(l1pagetable, 0x00000000,
	    &kernel_pt_table[KERNEL_PT_SYS]);
	for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++)
		pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000,
		    &kernel_pt_table[KERNEL_PT_KERNEL + loop]);
	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++)
		pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
		    &kernel_pt_table[KERNEL_PT_VMDATA + loop]);

	/* update the top of the kernel VM */
	pmap_curmaxkvaddr =
	    KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);

#ifdef VERBOSE_INIT_ARM
	printf("Mapping kernel\n");
#endif

	/* Now we fill in the L2 pagetable for the kernel static code/data */
	{
		extern char etext[], _end[];
		size_t textsize = (uintptr_t) etext - KERNEL_TEXT_BASE;
		size_t totalsize = (uintptr_t) _end - KERNEL_TEXT_BASE;
		u_int logical;

		textsize = (textsize + PGOFSET) & ~PGOFSET;
		totalsize = (totalsize + PGOFSET) & ~PGOFSET;
		
		logical = 0x00200000;	/* offset of kernel in RAM */

		logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
		    physical_start + logical, textsize,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
		logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
		    physical_start + logical, totalsize - textsize,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	}

#ifdef VERBOSE_INIT_ARM
	printf("Constructing L2 page tables\n");
#endif

	/* Map the stack pages */
	pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
	    IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
	    ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
	    UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
	    UPAGES * PAGE_SIZE, VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE);

	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
	    L1_TABLE_SIZE, VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE);

	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
		pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
		    kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
	}

	/* Map the Mini-Data cache clean area. */
	xscale_setup_minidata(l1pagetable, minidataclean.pv_va,
	    minidataclean.pv_pa);

	/* Map the vector page. */
#if 1
	/* MULTI-ICE requires that page 0 is NC/NB so that it can download the
	 * cache-clean code there.  */
	pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
#else
	pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
#endif

	/*
	 * map integrated peripherals at same address in l1pagetable
	 * so that we can continue to use console.
	 */
	pmap_devmap_bootstrap(l1pagetable, viper_devmap);

	/*
	 * Give the XScale global cache clean code an appropriately
	 * sized chunk of unmapped VA space starting at 0xff000000
	 * (our device mappings end before this address).
	 */
	xscale_cache_clean_addr = 0xff000000U;

	/*
	 * Now we have the real page tables in place so we can switch to them.
	 * Once this is done we will be running with the REAL kernel page
	 * tables.
	 */

	/*
	 * Update the physical_freestart/physical_freeend/free_pages
	 * variables.
	 */
	{
		extern char _end[];

		physical_freestart = physical_start +
		    (((((uintptr_t) _end) + PGOFSET) & ~PGOFSET) -
		     KERNEL_BASE);
		physical_freeend = physical_end;
		free_pages =
		    (physical_freeend - physical_freestart) / PAGE_SIZE;
	}

	/* Switch tables */
#ifdef VERBOSE_INIT_ARM
	printf("freestart = 0x%08lx, free_pages = %d (0x%x)\n",
	       physical_freestart, free_pages, free_pages);
	printf("switching to new L1 page table  @%#lx...", kernel_l1pt.pv_pa);
#endif

	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
	setttb(kernel_l1pt.pv_pa);
	cpu_tlb_flushID();
	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));

	/*
	 * Moved from cpu_startup() as data_abort_handler() references
	 * this during uvm init
	 */
	proc0paddr = (struct user *)kernelstack.pv_va;
	lwp0.l_addr = proc0paddr;

#ifdef VERBOSE_INIT_ARM
	printf("bootstrap done.\n");
#endif

	arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL);

	/*
	 * Pages were allocated during the secondary bootstrap for the
	 * stacks for different CPU modes.
	 * We must now set the r13 registers in the different CPU modes to
	 * point to these stacks.
	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
	 * of the stack memory.
	 */
	printf("init subsystems: stacks ");

	set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_UND32_MODE, undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);

	/*
	 * Well we should set a data abort handler.
	 * Once things get going this will change as we will need a proper
	 * handler.
	 * Until then we will use a handler that just panics but tells us
	 * why.
	 * Initialisation of the vectors will just panic on a data abort.
	 * This just fills in a slightly better one.
	 */
	printf("vectors ");
	data_abort_handler_address = (u_int)data_abort_handler;
	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
	undefined_handler_address = (u_int)undefinedinstruction_bounce;

	/* Initialise the undefined instruction handlers */
	printf("undefined ");
	undefined_init();

	/* Load memory into UVM. */
	printf("page ");
	uvm_setpagesize();        /* initialize PAGE_SIZE-dependent variables */
	uvm_page_physload(atop(physical_freestart), atop(physical_freeend),
	    atop(physical_freestart), atop(physical_freeend),
	    VM_FREELIST_DEFAULT);

	/* Boot strap pmap telling it where the kernel page table is */
	printf("pmap ");
	pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE);

#ifdef __HAVE_MEMORY_DISK__
	md_root_setconf(memory_disk, sizeof memory_disk);
#endif

#ifdef KGDB
	if (boothowto & RB_KDB) {
		kgdb_debug_init = 1;
		kgdb_connect(1);
	}
#endif

#ifdef DDB
	db_machine_init();

	/* Firmware doesn't load symbols. */
	ddb_init(0, NULL, NULL);

	if (boothowto & RB_KDB)
		Debugger();
#endif

	/* We return the new stack pointer address */
	return(kernelstack.pv_va + USPACE_SVC_STACK_TOP);
}
Esempio n. 8
0
File: mem.c Progetto: coyizumi/cs111
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	int error = 0;
	vm_offset_t va, eva, off, v;
	vm_prot_t prot;
	struct vm_page m;
	vm_page_t marr;
	vm_size_t cnt;

	cnt = 0;
	error = 0;

	pmap_page_init(&m);
	while (uio->uio_resid > 0 && !error) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			v = uio->uio_offset;

			off = uio->uio_offset & PAGE_MASK;
			cnt = PAGE_SIZE - ((vm_offset_t)iov->iov_base &
			    PAGE_MASK);
			cnt = min(cnt, PAGE_SIZE - off);
			cnt = min(cnt, iov->iov_len);

			m.phys_addr = trunc_page(v);
			marr = &m;
			error = uiomove_fromphys(&marr, off, cnt, uio);
		}
		else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
			va = uio->uio_offset;

			va = trunc_page(uio->uio_offset);
			eva = round_page(uio->uio_offset
			    + iov->iov_len);

			/* 
			 * Make sure that all the pages are currently resident
			 * so that we don't create any zero-fill pages.
			 */
			if (va >= VM_MIN_KERNEL_ADDRESS &&
			    eva <= VM_MAX_KERNEL_ADDRESS) {
				for (; va < eva; va += PAGE_SIZE)
					if (pmap_extract(kernel_pmap, va) == 0)
						return (EFAULT);

				prot = (uio->uio_rw == UIO_READ)
				    ? VM_PROT_READ : VM_PROT_WRITE;

				va = uio->uio_offset;
				if (kernacc((void *) va, iov->iov_len, prot)
				    == FALSE)
					return (EFAULT);
			}

			va = uio->uio_offset;
			error = uiomove((void *)va, iov->iov_len, uio);
			continue;
		}
	}

	return (error);
}
Esempio n. 9
0
void *
initarm(struct arm_boot_params *abp)
{
    struct pv_addr	kernel_l1pt;
    int loop;
    u_int l1pagetable;
    vm_offset_t freemempos;
    vm_offset_t afterkern;
    vm_offset_t lastaddr;

    int i;
    uint32_t memsize;

    boothowto = 0;  /* Likely not needed */
    lastaddr = parse_boot_param(abp);
    arm_physmem_kernaddr = abp->abp_physaddr;
    i = 0;
    set_cpufuncs();
    cpufuncs.cf_sleep = s3c24x0_sleep;

    pcpu0_init();

    /* Do basic tuning, hz etc */
    init_param1();

#define KERNEL_TEXT_BASE (KERNBASE)
    freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK;
    /* Define a macro to simplify memory allocation */
#define valloc_pages(var, np)			\
	alloc_pages((var).pv_va, (np));		\
	(var).pv_pa = (var).pv_va + (abp->abp_physaddr - KERNVIRTADDR);

#define alloc_pages(var, np)			\
	(var) = freemempos;			\
	freemempos += (np * PAGE_SIZE);		\
	memset((char *)(var), 0, ((np) * PAGE_SIZE));

    while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
        freemempos += PAGE_SIZE;
    valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
    for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
        if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
            valloc_pages(kernel_pt_table[loop],
                         L2_TABLE_SIZE / PAGE_SIZE);
        } else {
            kernel_pt_table[loop].pv_va = freemempos -
                                          (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) *
                                          L2_TABLE_SIZE_REAL;
            kernel_pt_table[loop].pv_pa =
                kernel_pt_table[loop].pv_va - KERNVIRTADDR +
                abp->abp_physaddr;
        }
    }
    /*
     * Allocate a page for the system page mapped to V0x00000000
     * This page will just contain the system vectors and can be
     * shared by all processes.
     */
    valloc_pages(systempage, 1);

    /* Allocate stacks for all modes */
    valloc_pages(irqstack, IRQ_STACK_SIZE);
    valloc_pages(abtstack, ABT_STACK_SIZE);
    valloc_pages(undstack, UND_STACK_SIZE);
    valloc_pages(kernelstack, KSTACK_PAGES);
    valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
    /*
     * Now we start construction of the L1 page table
     * We start by mapping the L2 page tables into the L1.
     * This means that we can replace L1 mappings later on if necessary
     */
    l1pagetable = kernel_l1pt.pv_va;

    /* Map the L2 pages tables in the L1 page table */
    pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH,
                   &kernel_pt_table[KERNEL_PT_SYS]);
    for (i = 0; i < KERNEL_PT_KERN_NUM; i++)
        pmap_link_l2pt(l1pagetable, KERNBASE + i * L1_S_SIZE,
                       &kernel_pt_table[KERNEL_PT_KERN + i]);
    pmap_map_chunk(l1pagetable, KERNBASE, PHYSADDR,
                   (((uint32_t)(lastaddr) - KERNBASE) + PAGE_SIZE) & ~(PAGE_SIZE - 1),
                   VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    afterkern = round_page((lastaddr + L1_S_SIZE) & ~(L1_S_SIZE
                           - 1));
    for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) {
        pmap_link_l2pt(l1pagetable, afterkern + i * L1_S_SIZE,
                       &kernel_pt_table[KERNEL_PT_AFKERNEL + i]);
    }

    /* Map the vector page. */
    pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
                   VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    /* Map the stack pages */
    pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
                   IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
                   ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
                   UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
                   KSTACK_PAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);

    pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
                   L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
    pmap_map_chunk(l1pagetable, msgbufpv.pv_va, msgbufpv.pv_pa,
                   msgbufsize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);


    for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
        pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
                       kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
                       VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
    }

    arm_devmap_bootstrap(l1pagetable, s3c24x0_devmap);

    cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
    setttb(kernel_l1pt.pv_pa);
    cpu_tlb_flushID();
    cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));

    /*
     * Pages were allocated during the secondary bootstrap for the
     * stacks for different CPU modes.
     * We must now set the r13 registers in the different CPU modes to
     * point to these stacks.
     * Since the ARM stacks use STMFD etc. we must set r13 to the top end
     * of the stack memory.
     */

    cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE);
    set_stackptrs(0);

    /*
     * We must now clean the cache again....
     * Cleaning may be done by reading new data to displace any
     * dirty data in the cache. This will have happened in setttb()
     * but since we are boot strapping the addresses used for the read
     * may have just been remapped and thus the cache could be out
     * of sync. A re-clean after the switch will cure this.
     * After booting there are no gross reloations of the kernel thus
     * this problem will not occur after initarm().
     */
    cpu_idcache_wbinv_all();
    cpu_setup("");

    /* Disable all peripheral interrupts */
    ioreg_write32(S3C24X0_INTCTL_BASE + INTCTL_INTMSK, ~0);
    memsize = board_init();
    /* Find pclk for uart */
    switch(ioreg_read32(S3C24X0_GPIO_BASE + GPIO_GSTATUS1) >> 16) {
    case 0x3241:
        s3c2410_clock_freq2(S3C24X0_CLKMAN_BASE, NULL, NULL,
                            &s3c2410_pclk);
        break;
    case 0x3244:
        s3c2440_clock_freq2(S3C24X0_CLKMAN_BASE, NULL, NULL,
                            &s3c2410_pclk);
        break;
    }
    cninit();

    undefined_init();

    init_proc0(kernelstack.pv_va);

    arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);

    pmap_curmaxkvaddr = afterkern + 0x100000 * (KERNEL_PT_KERN_NUM - 1);
    vm_max_kernel_address = KERNVIRTADDR + 3 * memsize;
    pmap_bootstrap(freemempos, &kernel_l1pt);
    msgbufp = (void*)msgbufpv.pv_va;
    msgbufinit(msgbufp, msgbufsize);
    mutex_init();

    /*
     * Add the physical ram we have available.
     *
     * Exclude the kernel, and all the things we allocated which immediately
     * follow the kernel, from the VM allocation pool but not from crash
     * dumps.  virtual_avail is a global variable which tracks the kva we've
     * "allocated" while setting up pmaps.
     *
     * Prepare the list of physical memory available to the vm subsystem.
     */
    arm_physmem_hardware_region(PHYSADDR, memsize);
    arm_physmem_exclude_region(abp->abp_physaddr,
                               virtual_avail - KERNVIRTADDR, EXFLAG_NOALLOC);
    arm_physmem_init_kernel_globals();

    init_param2(physmem);
    kdb_init();

    return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
                     sizeof(struct pcb)));
}
Esempio n. 10
0
void* AllocateExecutableMemory(size_t size, bool low)
{
#if defined(_WIN32)
	void* ptr = VirtualAlloc(0, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
#elif defined(__SYMBIAN32__)
	// On Symbian, we will need to create an RChunk and allocate with ->CreateLocalCode(size, size);
	static char *map_hint = 0;
	void* ptr = mmap(map_hint, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE, -1, 0);
#else
	static char *map_hint = 0;
#if defined(__x86_64__) && !defined(MAP_32BIT)
	// This OS has no flag to enforce allocation below the 4 GB boundary,
	// but if we hint that we want a low address it is very likely we will
	// get one.
	// An older version of this code used MAP_FIXED, but that has the side
	// effect of discarding already mapped pages that happen to be in the
	// requested virtual memory range (such as the emulated RAM, sometimes).
	if (low && (!map_hint))
		map_hint = (char*)round_page(512*1024*1024); /* 0.5 GB rounded up to the next page */
#endif
	void* ptr = mmap(map_hint, size, PROT_READ | PROT_WRITE | PROT_EXEC,
		MAP_ANON | MAP_PRIVATE
#if defined(__x86_64__) && defined(MAP_32BIT)
		| (low ? MAP_32BIT : 0)
#endif
		, -1, 0);
#endif /* defined(_WIN32) */

	// printf("Mapped executable memory at %p (size %ld)\n", ptr,
	//	(unsigned long)size);

#if defined(__FreeBSD__)
	if (ptr == MAP_FAILED)
	{
		ptr = NULL;
#else
	if (ptr == NULL)
	{
#endif
		PanicAlert("Failed to allocate executable memory");
	}
#if !defined(_WIN32) && defined(__x86_64__) && !defined(MAP_32BIT)
	else
	{
		if (low)
		{
			map_hint += size;
			map_hint = (char*)round_page(map_hint); /* round up to the next page */
			// printf("Next map will (hopefully) be at %p\n", map_hint);
		}
	}
#endif

#if defined(_M_X64)
	if ((u64)ptr >= 0x80000000 && low == true)
		PanicAlert("Executable memory ended up above 2GB!");
#endif

	return ptr;
}

void* AllocateMemoryPages(size_t size)
{
#ifdef _WIN32
	void* ptr = VirtualAlloc(0, size, MEM_COMMIT, PAGE_READWRITE);
#else
	void* ptr = mmap(0, size, PROT_READ | PROT_WRITE,
#ifndef __SYMBIAN32__
		MAP_ANON |
#endif
		MAP_PRIVATE, -1, 0);
#endif

	// printf("Mapped memory at %p (size %ld)\n", ptr,
	//	(unsigned long)size);

	if (ptr == NULL)
		PanicAlert("Failed to allocate raw memory");

	return ptr;
}
Esempio n. 11
0
      out_buf_offs += nwrite;
    }

  void zreaderr (void)
    {
      zerr = EIO;
      longjmp (zerr_jmp_buf, 1);
    }
  void zerror (const char *msg)
    {
      zerr = EINVAL;
      longjmp (zerr_jmp_buf, 2);
    }

  /* Try to guess a reasonable output buffer size.  */
  *buf_len = round_page (from->f_size * 2);
  zerr = vm_allocate (mach_task_self (), (vm_address_t *)buf, *buf_len, 1);
  if (zerr)
    return zerr;

  mutex_lock (&unzip_lock);

  unzip_read = zread;
  unzip_write = zwrite;
  unzip_read_error = zreaderr;
  unzip_error = zerror;

  if (! setjmp (zerr_jmp_buf))
    {
      if (get_method (0) != 0)
	/* Not a happy gzip file.  */
Esempio n. 12
0
/*
 * Allocate physical memory from the given physical address range.
 * Called by DMA-safe memory allocation methods.
 */
int
_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    int flags, paddr_t low, paddr_t high)
{
	paddr_t curaddr, lastaddr;
	vm_page_t m;
	struct pglist mlist;
	int curseg, error, plaflag;

	/* Always round the size. */
	size = round_page(size);

	/*
	 * Allocate pages from the VM system.
	 */
	plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
	if (flags & BUS_DMA_ZERO)
		plaflag |= UVM_PLA_ZERO;

	TAILQ_INIT(&mlist);
	error = uvm_pglistalloc(size, low, high, alignment, boundary,
	    &mlist, nsegs, plaflag);
	if (error)
		return (error);

	/*
	 * Compute the location, size, and number of segments actually
	 * returned by the VM code.
	 */
	m = TAILQ_FIRST(&mlist);
	curseg = 0;
	lastaddr = segs[curseg].ds_addr =
	    (*t->_pa_to_device)(VM_PAGE_TO_PHYS(m));
	segs[curseg].ds_len = PAGE_SIZE;
	m = TAILQ_NEXT(m, pageq);

	for (; m != TAILQ_END(&mlist); m = TAILQ_NEXT(m, pageq)) {
		curaddr = VM_PAGE_TO_PHYS(m);
#ifdef DIAGNOSTIC
		if (curaddr < low || curaddr >= high) {
			printf("vm_page_alloc_memory returned non-sensical"
			    " address 0x%lx\n", curaddr);
			panic("_dmamem_alloc_range");
		}
#endif
		curaddr = (*t->_pa_to_device)(curaddr);
		if (curaddr == (lastaddr + PAGE_SIZE))
			segs[curseg].ds_len += PAGE_SIZE;
		else {
			curseg++;
			segs[curseg].ds_addr = curaddr;
			segs[curseg].ds_len = PAGE_SIZE;
		}
		lastaddr = curaddr;
	}

	*rsegs = curseg + 1;

	return (0);
}
Esempio n. 13
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
    caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	paddr_t pa;
	bus_addr_t addr;
	int curseg, error, pmap_flags;

	if (nsegs == 1) {
		pa = (*t->_device_to_pa)(segs[0].ds_addr);
		if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
			*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_NC);
		else
			*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_CACHED);
		return (0);
	}

	size = round_page(size);
	va = uvm_km_valloc(kernel_map, size);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	pmap_flags = PMAP_WIRED | PMAP_CANFAIL;
	if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
		pmap_flags |= PMAP_NOCACHE;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += NBPG, va += NBPG, size -= NBPG) {
			if (size == 0)
				panic("_dmamem_map: size botch");
			pa = (*t->_device_to_pa)(addr);
			error = pmap_enter(pmap_kernel(), va, pa,
			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
			    VM_PROT_WRITE | pmap_flags);
			if (error) {
				pmap_update(pmap_kernel());
				uvm_km_free(kernel_map, sva, ssize);
				return (error);
			}

			/*
			 * This is redundant with what pmap_enter() did 
			 * above, but will take care of forcing other
			 * mappings of the same page (if any) to be
			 * uncached. 
			 * If there are no multiple mappings of that 
			 * page, this amounts to a noop.
			 */
			if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE)) 
				pmap_page_cache(PHYS_TO_VM_PAGE(pa),
				    PV_UNCACHED);
		}
		pmap_update(pmap_kernel());
	}

	return (0);
}
Esempio n. 14
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	vm_offset_t eva;
	vm_offset_t off;
	vm_offset_t ova;
	vm_offset_t va;
	vm_prot_t prot;
	vm_paddr_t pa;
	vm_size_t cnt;
	vm_page_t m;
	int error;
	int i;
	uint32_t colors;

	cnt = 0;
	colors = 1;
	error = 0;
	ova = 0;

	GIANT_REQUIRED;

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			pa = uio->uio_offset & ~PAGE_MASK;
			if (!is_physical_memory(pa)) {
				error = EFAULT;
				break;
			}

			off = uio->uio_offset & PAGE_MASK;
			cnt = PAGE_SIZE - ((vm_offset_t)iov->iov_base &
			    PAGE_MASK);
			cnt = ulmin(cnt, PAGE_SIZE - off);
			cnt = ulmin(cnt, iov->iov_len);

			m = NULL;
			for (i = 0; phys_avail[i] != 0; i += 2) {
				if (pa >= phys_avail[i] &&
				    pa < phys_avail[i + 1]) {
					m = PHYS_TO_VM_PAGE(pa);
					break;
				}
			}

			if (m != NULL) {
				if (ova == 0) {
					if (dcache_color_ignore == 0)
						colors = DCACHE_COLORS;
					ova = kmem_alloc_wait(kernel_map,
					    PAGE_SIZE * colors);
				}
				if (colors != 1 && m->md.color != -1)
					va = ova + m->md.color * PAGE_SIZE;
				else
					va = ova;
				pmap_qenter(va, &m, 1);
				error = uiomove((void *)(va + off), cnt,
				    uio);
				pmap_qremove(va, 1);
			} else {
				va = TLB_PHYS_TO_DIRECT(pa);
				error = uiomove((void *)(va + off), cnt,
				    uio);
			}
			break;
		} else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
			va = trunc_page(uio->uio_offset);
			eva = round_page(uio->uio_offset + iov->iov_len);

			/*
			 * Make sure that all of the pages are currently
			 * resident so we don't create any zero fill pages.
			 */
			for (; va < eva; va += PAGE_SIZE)
				if (pmap_kextract(va) == 0)
					return (EFAULT);

			prot = (uio->uio_rw == UIO_READ) ? VM_PROT_READ :
			    VM_PROT_WRITE;
			va = uio->uio_offset;
			if (va < VM_MIN_DIRECT_ADDRESS &&
			    kernacc((void *)va, iov->iov_len, prot) == FALSE)
				return (EFAULT);

			error = uiomove((void *)va, iov->iov_len, uio);
			break;
		}
		/* else panic! */
	}
	if (ova != 0)
		kmem_free_wakeup(kernel_map, ova, PAGE_SIZE * colors);
	return (error);
}
Esempio n. 15
0
/*
 * mincore system call handler
 *
 * mincore_args(const void *addr, size_t len, char *vec)
 *
 * No requirements
 */
int
sys_mincore(struct mincore_args *uap)
{
	struct proc *p = curproc;
	vm_offset_t addr, first_addr;
	vm_offset_t end, cend;
	pmap_t pmap;
	vm_map_t map;
	char *vec;
	int error;
	int vecindex, lastvecindex;
	vm_map_entry_t current;
	vm_map_entry_t entry;
	int mincoreinfo;
	unsigned int timestamp;

	/*
	 * Make sure that the addresses presented are valid for user
	 * mode.
	 */
	first_addr = addr = trunc_page((vm_offset_t) uap->addr);
	end = addr + (vm_size_t)round_page(uap->len);
	if (end < addr)
		return (EINVAL);
	if (VM_MAX_USER_ADDRESS > 0 && end > VM_MAX_USER_ADDRESS)
		return (EINVAL);

	/*
	 * Address of byte vector
	 */
	vec = uap->vec;

	map = &p->p_vmspace->vm_map;
	pmap = vmspace_pmap(p->p_vmspace);

	lwkt_gettoken(&map->token);
	vm_map_lock_read(map);
RestartScan:
	timestamp = map->timestamp;

	if (!vm_map_lookup_entry(map, addr, &entry))
		entry = entry->next;

	/*
	 * Do this on a map entry basis so that if the pages are not
	 * in the current processes address space, we can easily look
	 * up the pages elsewhere.
	 */
	lastvecindex = -1;
	for(current = entry;
		(current != &map->header) && (current->start < end);
		current = current->next) {

		/*
		 * ignore submaps (for now) or null objects
		 */
		if (current->maptype != VM_MAPTYPE_NORMAL &&
		    current->maptype != VM_MAPTYPE_VPAGETABLE) {
			continue;
		}
		if (current->object.vm_object == NULL)
			continue;
		
		/*
		 * limit this scan to the current map entry and the
		 * limits for the mincore call
		 */
		if (addr < current->start)
			addr = current->start;
		cend = current->end;
		if (cend > end)
			cend = end;

		/*
		 * scan this entry one page at a time
		 */
		while (addr < cend) {
			/*
			 * Check pmap first, it is likely faster, also
			 * it can provide info as to whether we are the
			 * one referencing or modifying the page.
			 *
			 * If we have to check the VM object, only mess
			 * around with normal maps.  Do not mess around
			 * with virtual page tables (XXX).
			 */
			mincoreinfo = pmap_mincore(pmap, addr);
			if (mincoreinfo == 0 &&
			    current->maptype == VM_MAPTYPE_NORMAL) {
				vm_pindex_t pindex;
				vm_ooffset_t offset;
				vm_page_t m;

				/*
				 * calculate the page index into the object
				 */
				offset = current->offset + (addr - current->start);
				pindex = OFF_TO_IDX(offset);

				/*
				 * if the page is resident, then gather 
				 * information about it.  spl protection is
				 * required to maintain the object 
				 * association.  And XXX what if the page is
				 * busy?  What's the deal with that?
				 *
				 * XXX vm_token - legacy for pmap_ts_referenced
				 *     in i386 and vkernel pmap code.
				 */
				lwkt_gettoken(&vm_token);
				vm_object_hold(current->object.vm_object);
				m = vm_page_lookup(current->object.vm_object,
						    pindex);
				if (m && m->valid) {
					mincoreinfo = MINCORE_INCORE;
					if (m->dirty ||
						pmap_is_modified(m))
						mincoreinfo |= MINCORE_MODIFIED_OTHER;
					if ((m->flags & PG_REFERENCED) ||
						pmap_ts_referenced(m)) {
						vm_page_flag_set(m, PG_REFERENCED);
						mincoreinfo |= MINCORE_REFERENCED_OTHER;
					}
				}
				vm_object_drop(current->object.vm_object);
				lwkt_reltoken(&vm_token);
			}

			/*
			 * subyte may page fault.  In case it needs to modify
			 * the map, we release the lock.
			 */
			vm_map_unlock_read(map);

			/*
			 * calculate index into user supplied byte vector
			 */
			vecindex = OFF_TO_IDX(addr - first_addr);

			/*
			 * If we have skipped map entries, we need to make sure that
			 * the byte vector is zeroed for those skipped entries.
			 */
			while((lastvecindex + 1) < vecindex) {
				error = subyte( vec + lastvecindex, 0);
				if (error) {
					error = EFAULT;
					goto done;
				}
				++lastvecindex;
			}

			/*
			 * Pass the page information to the user
			 */
			error = subyte( vec + vecindex, mincoreinfo);
			if (error) {
				error = EFAULT;
				goto done;
			}

			/*
			 * If the map has changed, due to the subyte, the previous
			 * output may be invalid.
			 */
			vm_map_lock_read(map);
			if (timestamp != map->timestamp)
				goto RestartScan;

			lastvecindex = vecindex;
			addr += PAGE_SIZE;
		}
	}

	/*
	 * subyte may page fault.  In case it needs to modify
	 * the map, we release the lock.
	 */
	vm_map_unlock_read(map);

	/*
	 * Zero the last entries in the byte vector.
	 */
	vecindex = OFF_TO_IDX(end - first_addr);
	while((lastvecindex + 1) < vecindex) {
		error = subyte( vec + lastvecindex, 0);
		if (error) {
			error = EFAULT;
			goto done;
		}
		++lastvecindex;
	}
	
	/*
	 * If the map has changed, due to the subyte, the previous
	 * output may be invalid.
	 */
	vm_map_lock_read(map);
	if (timestamp != map->timestamp)
		goto RestartScan;
	vm_map_unlock_read(map);

	error = 0;
done:
	lwkt_reltoken(&map->token);
	return (error);
}
/*******************************************************************************
 * remove the symbol table and string table from the LINKEDIT segment, leaving *
 * any relocation data within the segment.                                     *
*******************************************************************************/
boolean_t macho_trim_linkedit(
    u_char  *macho,
    u_long  *amount_trimmed)
{
    boolean_t result = FALSE;
    struct mach_header *mach_hdr;
    struct mach_header_64 *mach_hdr64;
    u_char *src, *dst;
    uint32_t ncmds, cmdsize;
    boolean_t swap = FALSE;
    boolean_t is32bit = FALSE;
    u_int i;
    u_char *linkedit_segment = NULL;
    struct symtab_command *symtab = NULL;
    struct dysymtab_command *dysymtab = NULL;

    *amount_trimmed = 0;    /* initialize */

    swap = macho_swap(macho);

    mach_hdr = (struct mach_header *) macho;
    mach_hdr64 = (struct mach_header_64 *) macho;

    /* Find the start of the load commands */

    if (mach_hdr->magic == MH_MAGIC) {
        src = dst = macho + sizeof(*mach_hdr);
        ncmds = mach_hdr->ncmds;
        is32bit = TRUE;
    } else if (mach_hdr->magic == MH_MAGIC_64) {
        src = dst = macho + sizeof(*mach_hdr64);
        ncmds = mach_hdr64->ncmds;
        is32bit = FALSE;
    } else {
        goto finish;
    }

    /* find any LINKEDIT-related load commands */

    for (i = 0; i < ncmds; ++i, src += cmdsize) {
        struct load_command * lc = (struct load_command *) src;
        struct segment_command *seg = (struct segment_command *) src;
        struct segment_command_64 *seg64 = (struct segment_command_64 *) src;

        cmdsize = lc->cmdsize;

        /* First, identify the load commands of interest */
        switch (lc->cmd) {
            case LC_SEGMENT:
                if (!strncmp(seg->segname, SEG_LINKEDIT, sizeof(SEG_LINKEDIT) - 1)) {
                    linkedit_segment = src;
                }
                break;
                
            case LC_SEGMENT_64:
                if (!strncmp(seg64->segname, SEG_LINKEDIT, sizeof(SEG_LINKEDIT) - 1)) {
                    linkedit_segment = src;
                }
                break;
                
            case LC_SYMTAB:
                symtab = (struct symtab_command *) src;
                break;
                
            case LC_DYSYMTAB:
                dysymtab = (struct dysymtab_command *) src;
                break;
        }
    }

    /* was a LINKEDIT segment found? (it damned well better be there!) */
    if (linkedit_segment == NULL)
        goto finish;	/* yowza! */

    /* if no DYSYMTAB command was found, just remove the entire LINKEDIT segment */
    if (dysymtab == NULL) {
        if (swap) macho_unswap(macho);
        return (macho_remove_linkedit(macho, amount_trimmed));
    }
    else {

        /* Calculate size of symbol table (including strings):
         *   # of symbols * sizeof (nlist | nlist_64)...
         *   + size of string table...
         *   aligned to 8-byte boundary
         */
        u_long symtab_size = (((symtab->nsyms
                                * (is32bit ? sizeof(struct nlist) : sizeof(struct nlist_64)))
                               + symtab->strsize) + 7 ) & ~7;

        /* calculate size of relocation entries */
        u_long reloc_size = dysymtab->nlocrel * sizeof(struct relocation_info);

        /* cache old vmsize */
        u_long  old_vmsize = 
            (is32bit
                ? ((struct segment_command *)    linkedit_segment)->vmsize
                : ((struct segment_command_64 *) linkedit_segment)->vmsize);

        /* calculate new segment size after removal of symtab/stringtab data */
        u_long  new_vmsize = round_page(reloc_size);

        /* If the relocation entries are positioned within the LINKEDIT segment AFTER
         * the symbol table, those entries must be moved within the segment. Otherwise,
         * the segment can simply be truncated to remove the symbol table.
         */
        if (symtab->symoff < dysymtab->locreloff) {
            /* move them up within the segment, overwriting the existing symbol table */
            memmove(macho + symtab->symoff, macho + dysymtab->locreloff, reloc_size);
            
            /* update the header field */
            dysymtab->locreloff = symtab->symoff;
            
            /* clear now-unused data within the segment */
            bzero(macho + dysymtab->locreloff + reloc_size, symtab_size);
        }
        else {
            /* symtab/stringtab entries are located after the relocation entries
             * in the segment. Therefore, we just have to truncate the segment
             * appropriately
             */
            bzero(macho + symtab->symoff, symtab_size);	/* wipe any existing data */
        }

        /* update LINKEDIT segment command with new size */
        if (is32bit) {
            ((struct segment_command *) linkedit_segment)->vmsize = 
            ((struct segment_command *) linkedit_segment)->filesize = new_vmsize;
        }
        else {
            ((struct segment_command_64 *) linkedit_segment)->vmsize = 
            ((struct segment_command_64 *) linkedit_segment)->filesize = new_vmsize;
        }

        /* notify caller of # of bytes removed from segment */
        *amount_trimmed = old_vmsize - new_vmsize;
    }

    /* now that the LINKEDIT segment contents have been adjusted properly, we must
     * remove the actual SYMTAB load command from the header
     */
    src = dst;  /* reset for second pass through header */
    for (i = 0; i < ncmds; ++i, src += cmdsize) {
        struct load_command * lc = (struct load_command *) src;

        cmdsize = lc->cmdsize;

        if (lc->cmd == LC_SYMTAB) {
            if (is32bit) {
                mach_hdr->ncmds--;
                mach_hdr->sizeofcmds -= cmdsize;
            } else {
                mach_hdr64->ncmds--;
                mach_hdr64->sizeofcmds -= cmdsize;
            }
            bzero(src, lc->cmdsize);	/* zap the SYMTAB command */
        }
        else {
            /* move remaining load commands up within the header */
            if (dst != src) {
                memmove(dst, src, cmdsize);
            }
            dst += cmdsize;
        }
    }

    result = TRUE;
finish:
    if (swap) macho_unswap(macho);
    return result;
}
Esempio n. 17
0
void
cpu_startup()
{
	caddr_t		v;
	int		sz;
	vaddr_t		minaddr, maxaddr;
	extern unsigned int avail_end;
	extern char	cpu_model[];

	/*
	 * Initialize error message buffer.
	 */
	initmsgbuf((caddr_t)msgbufp, round_page(MSGBUFSIZE));

	/*
	 * Good {morning,afternoon,evening,night}.
	 * Also call CPU init on systems that need that.
	 */
	printf("%s%s [%08X %08X]\n", version, cpu_model, vax_cpudata, vax_siedata);
        if (dep_call->cpu_conf)
                (*dep_call->cpu_conf)();

	printf("real mem = %u (%uMB)\n", avail_end,
	    avail_end/1024/1024);
	physmem = btoc(avail_end);
	mtpr(AST_NO, PR_ASTLVL);
	spl0();

	/*
	 * Find out how much space we need, allocate it, and then give
	 * everything true virtual addresses.
	 */

	sz = (int) allocsys((caddr_t)0);
	if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0)
		panic("startup: no room for tables");
	if (((unsigned long)allocsys(v) - (unsigned long)v) != sz)
		panic("startup: table size inconsistency");

	/*
	 * Determine how many buffers to allocate.
	 * We allocate bufcachepercent% of memory for buffer space.
	 */
	if (bufpages == 0)
		bufpages = physmem * bufcachepercent / 100;

	/* Restrict to at most 25% filled kvm */
	if (bufpages >
	    (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) 
		bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) /
		    PAGE_SIZE / 4;

	/*
	 * Allocate a submap for exec arguments.  This map effectively limits
	 * the number of processes exec'ing at any time.
	 */
	minaddr = vm_map_min(kernel_map);
	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				 16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);

#if VAX46 || VAX48 || VAX49 || VAX53
	/*
	 * Allocate a submap for physio.  This map effectively limits the
	 * number of processes doing physio at any one time.
	 *
	 * Note that machines on which all mass storage I/O controllers 
	 * can perform address translation, do not need this.
	 */
	if (vax_boardtype == VAX_BTYP_46 || vax_boardtype == VAX_BTYP_48 ||
	    vax_boardtype == VAX_BTYP_49 || vax_boardtype == VAX_BTYP_1303)
		phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
		    VM_PHYS_SIZE, 0, FALSE, NULL);
#endif

	printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free),
	    ptoa(uvmexp.free)/1024/1024);

	/*
	 * Set up buffers, so they can be used to read disk labels.
	 */

	bufinit();
#ifdef DDB
	if (boothowto & RB_KDB)
		Debugger();
#endif

	/*
	 * Configure the system.
	 */
	if (boothowto & RB_CONFIG) {
#ifdef BOOT_CONFIG
		user_config();
#else
		printf("kernel does not support -c; continuing..\n");
#endif
	}
}
Esempio n. 18
0
/*
 * u_int initarm(...)
 *
 * Initial entry point on startup. This gets called before main() is
 * entered.
 * It should be responsible for setting up everything that must be
 * in place when main is called.
 * This includes
 *   Taking a copy of the boot configuration structure.
 *   Initialising the physical console so characters can be printed.
 *   Setting up page tables for the kernel
 *   Relocating the kernel to the bottom of physical memory
 */
u_int
initarm(void *arg)
{
	extern vaddr_t xscale_cache_clean_addr;
#ifdef DIAGNOSTIC
	extern vsize_t xscale_minidata_clean_size;
#endif
	int loop;
	int loop1;
	u_int l1pagetable;
	paddr_t memstart;
	psize_t memsize;

	/* Calibrate the delay loop. */
	i80321_calibrate_delay();
	i80321_hardclock_hook = NULL;

	/*
	 * Since we map the on-board devices VA==PA, and the kernel
	 * is running VA==PA, it's possible for us to initialize
	 * the console now.
	 */
	consinit();

#ifdef VERBOSE_INIT_ARM
	/* Talk to the user */
	printf("\nNetBSD/evbarm (NPWR_FC) booting ...\n");
#endif

	/*
	 * Heads up ... Setup the CPU / MMU / TLB functions
	 */
	if (set_cpufuncs())
		panic("cpu not recognized!");

	/*
	 * We are currently running with the MMU enabled and the
	 * entire address space mapped VA==PA, except for the
	 * first 64M of RAM is also double-mapped at 0xc0000000.
	 * There is an L1 page table at 0xa0004000.
	 */

	/*
	 * Fetch the SDRAM start/size from the i80321 SDRAM configration
	 * registers.
	 */
	i80321_sdram_bounds(&obio_bs_tag, VERDE_PMMR_BASE + VERDE_MCU_BASE,
	    &memstart, &memsize);

#ifdef VERBOSE_INIT_ARM
	printf("initarm: Configuring system ...\n");
#endif

	/* Fake bootconfig structure for the benefit of pmap.c */
	/* XXX must make the memory description h/w independent */
	bootconfig.dramblocks = 1;
	bootconfig.dram[0].address = memstart;
	bootconfig.dram[0].pages = memsize / PAGE_SIZE;

	/*
	 * Set up the variables that define the availablilty of
	 * physical memory.  For now, we're going to set
	 * physical_freestart to 0xa0200000 (where the kernel
	 * was loaded), and allocate the memory we need downwards.
	 * If we get too close to the L1 table that we set up, we
	 * will panic.  We will update physical_freestart and
	 * physical_freeend later to reflect what pmap_bootstrap()
	 * wants to see.
	 *
	 * XXX pmap_bootstrap() needs an enema.
	 */
	physical_start = bootconfig.dram[0].address;
	physical_end = physical_start + (bootconfig.dram[0].pages * PAGE_SIZE);

	physical_freestart = 0xa0009000UL;
	physical_freeend = 0xa0200000UL;

	physmem = (physical_end - physical_start) / PAGE_SIZE;

#ifdef VERBOSE_INIT_ARM
	/* Tell the user about the memory */
	printf("physmemory: %d pages at 0x%08lx -> 0x%08lx\n", physmem,
	    physical_start, physical_end - 1);
#endif

	/*
	 * Okay, the kernel starts 2MB in from the bottom of physical
	 * memory.  We are going to allocate our bootstrap pages downwards
	 * from there.
	 *
	 * We need to allocate some fixed page tables to get the kernel
	 * going.  We allocate one page directory and a number of page
	 * tables and store the physical addresses in the kernel_pt_table
	 * array.
	 *
	 * The kernel page directory must be on a 16K boundary.  The page
	 * tables must be on 4K bounaries.  What we do is allocate the
	 * page directory on the first 16K boundary that we encounter, and
	 * the page tables on 4K boundaries otherwise.  Since we allocate
	 * at least 3 L2 page tables, we are guaranteed to encounter at
	 * least one 16K aligned region.
	 */

#ifdef VERBOSE_INIT_ARM
	printf("Allocating page tables\n");
#endif

	free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE;

#ifdef VERBOSE_INIT_ARM
	printf("freestart = 0x%08lx, free_pages = %d (0x%08x)\n",
	       physical_freestart, free_pages, free_pages);
#endif

	/* Define a macro to simplify memory allocation */
#define	valloc_pages(var, np)				\
	alloc_pages((var).pv_pa, (np));			\
	(var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start;

#define alloc_pages(var, np)				\
	physical_freeend -= ((np) * PAGE_SIZE);		\
	if (physical_freeend < physical_freestart)	\
		panic("initarm: out of memory");	\
	(var) = physical_freeend;			\
	free_pages -= (np);				\
	memset((char *)(var), 0, ((np) * PAGE_SIZE));

	loop1 = 0;
	for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) {
		/* Are we 16KB aligned for an L1 ? */
		if (((physical_freeend - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) == 0
		    && kernel_l1pt.pv_pa == 0) {
			valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
		} else {
			valloc_pages(kernel_pt_table[loop1],
			    L2_TABLE_SIZE / PAGE_SIZE);
			++loop1;
		}
	}

	/* This should never be able to happen but better confirm that. */
	if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0)
		panic("initarm: Failed to align the kernel page directory");

	/*
	 * Allocate a page for the system page mapped to V0x00000000
	 * This page will just contain the system vectors and can be
	 * shared by all processes.
	 */
	alloc_pages(systempage.pv_pa, 1);

	/* Allocate stacks for all modes */
	valloc_pages(irqstack, IRQ_STACK_SIZE);
	valloc_pages(abtstack, ABT_STACK_SIZE);
	valloc_pages(undstack, UND_STACK_SIZE);
	valloc_pages(kernelstack, UPAGES);

	/* Allocate enough pages for cleaning the Mini-Data cache. */
	KASSERT(xscale_minidata_clean_size <= PAGE_SIZE);
	valloc_pages(minidataclean, 1);

#ifdef VERBOSE_INIT_ARM
	printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa,
	    irqstack.pv_va); 
	printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa,
	    abtstack.pv_va); 
	printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa,
	    undstack.pv_va); 
	printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa,
	    kernelstack.pv_va); 
#endif

	/*
	 * XXX Defer this to later so that we can reclaim the memory
	 * XXX used by the RedBoot page tables.
	 */
	alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);

	/*
	 * Ok we have allocated physical pages for the primary kernel
	 * page tables
	 */

#ifdef VERBOSE_INIT_ARM
	printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa);
#endif

	/*
	 * Now we start construction of the L1 page table
	 * We start by mapping the L2 page tables into the L1.
	 * This means that we can replace L1 mappings later on if necessary
	 */
	l1pagetable = kernel_l1pt.pv_pa;

	/* Map the L2 pages tables in the L1 page table */
	pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00400000 - 1),
	    &kernel_pt_table[KERNEL_PT_SYS]);
	for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++)
		pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000,
		    &kernel_pt_table[KERNEL_PT_KERNEL + loop]);
	pmap_link_l2pt(l1pagetable, IQ80321_IOPXS_VBASE,
	    &kernel_pt_table[KERNEL_PT_IOPXS]);
	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++)
		pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
		    &kernel_pt_table[KERNEL_PT_VMDATA + loop]);

	/* update the top of the kernel VM */
	pmap_curmaxkvaddr =
	    KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);

#ifdef VERBOSE_INIT_ARM
	printf("Mapping kernel\n");
#endif

	/* Now we fill in the L2 pagetable for the kernel static code/data */
	{
		extern char etext[], _end[];
		size_t textsize = (uintptr_t) etext - KERNEL_TEXT_BASE;
		size_t totalsize = (uintptr_t) _end - KERNEL_TEXT_BASE;
		u_int logical;

		textsize = (textsize + PGOFSET) & ~PGOFSET;
		totalsize = (totalsize + PGOFSET) & ~PGOFSET;
		
		logical = 0x00200000;	/* offset of kernel in RAM */

		logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
		    physical_start + logical, textsize,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
		logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
		    physical_start + logical, totalsize - textsize,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	}

#ifdef VERBOSE_INIT_ARM
	printf("Constructing L2 page tables\n");
#endif

	/* Map the stack pages */
	pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
	    IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
	    ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
	    UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
	    UPAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);

	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
	    L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);

	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
		pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
		    kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
	}

	/* Map the Mini-Data cache clean area. */
	xscale_setup_minidata(l1pagetable, minidataclean.pv_va,
	    minidataclean.pv_pa);

	/* Map the vector page. */
	pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);

	/* Map the statically mapped devices. */
	pmap_devmap_bootstrap(l1pagetable, iq80321_devmap);

	/*
	 * Give the XScale global cache clean code an appropriately
	 * sized chunk of unmapped VA space starting at 0xff000000
	 * (our device mappings end before this address).
	 */
	xscale_cache_clean_addr = 0xff000000U;

	/*
	 * Now we have the real page tables in place so we can switch to them.
	 * Once this is done we will be running with the REAL kernel page
	 * tables.
	 */

	/*
	 * Update the physical_freestart/physical_freeend/free_pages
	 * variables.
	 */
	{
		extern char _end[];

		physical_freestart = physical_start +
		    (((((uintptr_t) _end) + PGOFSET) & ~PGOFSET) -
		     KERNEL_BASE);
		physical_freeend = physical_end;
		free_pages =
		    (physical_freeend - physical_freestart) / PAGE_SIZE;
	}

	/* Switch tables */
#ifdef VERBOSE_INIT_ARM
	printf("freestart = 0x%08lx, free_pages = %d (0x%x)\n",
	       physical_freestart, free_pages, free_pages);
	printf("switching to new L1 page table  @%#lx...", kernel_l1pt.pv_pa);
#endif
	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
	setttb(kernel_l1pt.pv_pa);
	cpu_tlb_flushID();
	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));

	/*
	 * Moved from cpu_startup() as data_abort_handler() references
	 * this during uvm init
	 */
	proc0paddr = (struct user *)kernelstack.pv_va;
	lwp0.l_addr = proc0paddr;

#ifdef VERBOSE_INIT_ARM
	printf("done!\n");
#endif

#ifdef VERBOSE_INIT_ARM
	printf("bootstrap done.\n");
#endif

	arm32_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);

	/*
	 * Pages were allocated during the secondary bootstrap for the
	 * stacks for different CPU modes.
	 * We must now set the r13 registers in the different CPU modes to
	 * point to these stacks.
	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
	 * of the stack memory.
	 */
#ifdef VERBOSE_INIT_ARM
	printf("init subsystems: stacks ");
#endif

	set_stackptr(PSR_IRQ32_MODE,
	    irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_ABT32_MODE,
	    abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_UND32_MODE,
	    undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);

	/*
	 * Well we should set a data abort handler.
	 * Once things get going this will change as we will need a proper
	 * handler.
	 * Until then we will use a handler that just panics but tells us
	 * why.
	 * Initialisation of the vectors will just panic on a data abort.
	 * This just fills in a slighly better one.
	 */
#ifdef VERBOSE_INIT_ARM
	printf("vectors ");
#endif
	data_abort_handler_address = (u_int)data_abort_handler;
	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
	undefined_handler_address = (u_int)undefinedinstruction_bounce;

	/* Initialise the undefined instruction handlers */
#ifdef VERBOSE_INIT_ARM
	printf("undefined ");
#endif
	undefined_init();

	/* Load memory into UVM. */
#ifdef VERBOSE_INIT_ARM
	printf("page ");
#endif
	uvm_setpagesize();	/* initialize PAGE_SIZE-dependent variables */
	uvm_page_physload(atop(physical_freestart), atop(physical_freeend),
	    atop(physical_freestart), atop(physical_freeend),
	    VM_FREELIST_DEFAULT);

	/* Boot strap pmap telling it where the kernel page table is */
#ifdef VERBOSE_INIT_ARM
	printf("pmap ");
#endif
	pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE);

	/* Setup the IRQ system */
#ifdef VERBOSE_INIT_ARM
	printf("irq ");
#endif
	i80321_intr_init();

#ifdef VERBOSE_INIT_ARM
	printf("done.\n");
#endif

#ifdef BOOTHOWTO
	boothowto = BOOTHOWTO;
#endif

#if NKSYMS || defined(DDB) || defined(LKM)
	/* Firmware doesn't load symbols. */
	ksyms_init(0, NULL, NULL);
#endif

#ifdef DDB
	db_machine_init();
	if (boothowto & RB_KDB)
		Debugger();
#endif

	/* We return the new stack pointer address */
	return(kernelstack.pv_va + USPACE_SVC_STACK_TOP);
}
Esempio n. 19
0
/*
 * The file size of a mach-o file is limited to 32 bits; this is because
 * this is the limit on the kalloc() of enough bytes for a mach_header and
 * the contents of its sizeofcmds, which is currently constrained to 32
 * bits in the file format itself.  We read into the kernel buffer the
 * commands section, and then parse it in order to parse the mach-o file
 * format load_command segment(s).  We are only interested in a subset of
 * the total set of possible commands. If "map"==VM_MAP_NULL or
 * "thread"==THREAD_NULL, do not make permament VM modifications,
 * just preflight the parse.
 */
static
load_return_t
parse_machfile(
	struct vnode 		*vp,       
	vm_map_t		map,
	thread_t		thread,
	struct mach_header	*header,
	off_t			file_offset,
	off_t			macho_size,
	int			depth,
	int64_t			aslr_offset,
	int64_t			dyld_aslr_offset,
	load_result_t		*result
)
{
	uint32_t		ncmds;
	struct load_command	*lcp;
	struct dylinker_command	*dlp = 0;
	integer_t		dlarchbits = 0;
	void *			control;
	load_return_t		ret = LOAD_SUCCESS;
	caddr_t			addr;
	void *			kl_addr;
	vm_size_t		size,kl_size;
	size_t			offset;
	size_t			oldoffset;	/* for overflow check */
	int			pass;
	proc_t			p = current_proc();		/* XXXX */
	int			error;
	int resid=0;
	size_t			mach_header_sz = sizeof(struct mach_header);
	boolean_t		abi64;
	boolean_t		got_code_signatures = FALSE;
	int64_t			slide = 0;

	if (header->magic == MH_MAGIC_64 ||
	    header->magic == MH_CIGAM_64) {
	    	mach_header_sz = sizeof(struct mach_header_64);
	}

	/*
	 *	Break infinite recursion
	 */
	if (depth > 6) {
		return(LOAD_FAILURE);
	}

	depth++;

	/*
	 *	Check to see if right machine type.
	 */
	if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK)) ||
	    !grade_binary(header->cputype, 
	    	header->cpusubtype & ~CPU_SUBTYPE_MASK))
		return(LOAD_BADARCH);
		
	abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64);
		
	switch (header->filetype) {
	
	case MH_OBJECT:
	case MH_EXECUTE:
	case MH_PRELOAD:
		if (depth != 1) {
			return (LOAD_FAILURE);
		}
		break;
		
	case MH_FVMLIB:
	case MH_DYLIB:
		if (depth == 1) {
			return (LOAD_FAILURE);
		}
		break;

	case MH_DYLINKER:
		if (depth != 2) {
			return (LOAD_FAILURE);
		}
		break;
		
	default:
		return (LOAD_FAILURE);
	}

	/*
	 *	Get the pager for the file.
	 */
	control = ubc_getobject(vp, UBC_FLAGS_NONE);

	/*
	 *	Map portion that must be accessible directly into
	 *	kernel's map.
	 */
	if ((off_t)(mach_header_sz + header->sizeofcmds) > macho_size)
		return(LOAD_BADMACHO);

	/*
	 *	Round size of Mach-O commands up to page boundry.
	 */
	size = round_page(mach_header_sz + header->sizeofcmds);
	if (size <= 0)
		return(LOAD_BADMACHO);

	/*
	 * Map the load commands into kernel memory.
	 */
	addr = 0;
	kl_size = size;
	kl_addr = kalloc(size);
	addr = (caddr_t)kl_addr;
	if (addr == NULL)
		return(LOAD_NOSPACE);

	error = vn_rdwr(UIO_READ, vp, addr, size, file_offset,
	    UIO_SYSSPACE, 0, kauth_cred_get(), &resid, p);
	if (error) {
		if (kl_addr )
			kfree(kl_addr, kl_size);
		return(LOAD_IOERROR);
	}

	/*
	 *	For PIE and dyld, slide everything by the ASLR offset.
	 */
	if ((header->flags & MH_PIE) || (header->filetype == MH_DYLINKER)) {
		slide = aslr_offset;
	}

	 /*
	 *  Scan through the commands, processing each one as necessary.
	 *  We parse in three passes through the headers:
	 *  1: thread state, uuid, code signature
	 *  2: segments
	 *  3: dyld, encryption, check entry point
	 */
	
	for (pass = 1; pass <= 3; pass++) {

		/*
		 * Check that the entry point is contained in an executable segments
		 */ 
		if ((pass == 3) && (result->validentry == 0)) {
			thread_state_initialize(thread);
			ret = LOAD_FAILURE;
			break;
		}

		/*
		 * Loop through each of the load_commands indicated by the
		 * Mach-O header; if an absurd value is provided, we just
		 * run off the end of the reserved section by incrementing
		 * the offset too far, so we are implicitly fail-safe.
		 */
		offset = mach_header_sz;
		ncmds = header->ncmds;

		while (ncmds--) {
			/*
			 *	Get a pointer to the command.
			 */
			lcp = (struct load_command *)(addr + offset);
			oldoffset = offset;
			offset += lcp->cmdsize;

			/*
			 * Perform prevalidation of the struct load_command
			 * before we attempt to use its contents.  Invalid
			 * values are ones which result in an overflow, or
			 * which can not possibly be valid commands, or which
			 * straddle or exist past the reserved section at the
			 * start of the image.
			 */
			if (oldoffset > offset ||
			    lcp->cmdsize < sizeof(struct load_command) ||
			    offset > header->sizeofcmds + mach_header_sz) {
				ret = LOAD_BADMACHO;
				break;
			}

			/*
			 * Act on struct load_command's for which kernel
			 * intervention is required.
			 */
			switch(lcp->cmd) {
			case LC_SEGMENT:
				if (pass != 2)
					break;

				if (abi64) {
					/*
					 * Having an LC_SEGMENT command for the
					 * wrong ABI is invalid <rdar://problem/11021230>
					 */
					ret = LOAD_BADMACHO;
					break;
				}

				ret = load_segment(lcp,
				                   header->filetype,
				                   control,
				                   file_offset,
				                   macho_size,
				                   vp,
				                   map,
				                   slide,
				                   result);
				break;
			case LC_SEGMENT_64:
				if (pass != 2)
					break;

				if (!abi64) {
					/*
					 * Having an LC_SEGMENT_64 command for the
					 * wrong ABI is invalid <rdar://problem/11021230>
					 */
					ret = LOAD_BADMACHO;
					break;
				}

				ret = load_segment(lcp,
				                   header->filetype,
				                   control,
				                   file_offset,
				                   macho_size,
				                   vp,
				                   map,
				                   slide,
				                   result);
				break;
			case LC_UNIXTHREAD:
				if (pass != 1)
					break;
				ret = load_unixthread(
						 (struct thread_command *) lcp,
						 thread,
						 slide,
						 result);
				break;
			case LC_MAIN:
				if (pass != 1)
					break;
				if (depth != 1)
					break;
				ret = load_main(
						 (struct entry_point_command *) lcp,
						 thread,
						 slide,
						 result);
				break;
			case LC_LOAD_DYLINKER:
				if (pass != 3)
					break;
				if ((depth == 1) && (dlp == 0)) {
					dlp = (struct dylinker_command *)lcp;
					dlarchbits = (header->cputype & CPU_ARCH_MASK);
				} else {
					ret = LOAD_FAILURE;
				}
				break;
			case LC_UUID:
				if (pass == 1 && depth == 1) {
					ret = load_uuid((struct uuid_command *) lcp,
							(char *)addr + mach_header_sz + header->sizeofcmds,
							result);
				}
				break;
			case LC_CODE_SIGNATURE:
				/* CODE SIGNING */
				if (pass != 1)
					break;
				/* pager -> uip ->
				   load signatures & store in uip
				   set VM object "signed_pages"
				*/
				ret = load_code_signature(
					(struct linkedit_data_command *) lcp,
					vp,
					file_offset,
					macho_size,
					header->cputype,
					(depth == 1) ? result : NULL);
				if (ret != LOAD_SUCCESS) {
					printf("proc %d: load code signature error %d "
					       "for file \"%s\"\n",
					       p->p_pid, ret, vp->v_name);
					ret = LOAD_SUCCESS; /* ignore error */
				} else {
					got_code_signatures = TRUE;
				}
				break;
#if CONFIG_CODE_DECRYPTION
			case LC_ENCRYPTION_INFO:
			case LC_ENCRYPTION_INFO_64:
				if (pass != 3)
					break;
				ret = set_code_unprotect(
					(struct encryption_info_command *) lcp,
					addr, map, slide, vp,
					header->cputype, header->cpusubtype);
				if (ret != LOAD_SUCCESS) {
					printf("proc %d: set_code_unprotect() error %d "
					       "for file \"%s\"\n",
					       p->p_pid, ret, vp->v_name);
					/* 
					 * Don't let the app run if it's 
					 * encrypted but we failed to set up the
					 * decrypter. If the keys are missing it will
					 * return LOAD_DECRYPTFAIL.
					 */
					 if (ret == LOAD_DECRYPTFAIL) {
						/* failed to load due to missing FP keys */
						proc_lock(p);
						p->p_lflag |= P_LTERM_DECRYPTFAIL;
						proc_unlock(p);
					}
					 psignal(p, SIGKILL);
				}
				break;
#endif
			default:
				/* Other commands are ignored by the kernel */
				ret = LOAD_SUCCESS;
				break;
			}
			if (ret != LOAD_SUCCESS)
				break;
		}
		if (ret != LOAD_SUCCESS)
			break;
	}
	if (ret == LOAD_SUCCESS) { 
	    if (! got_code_signatures) {
		    struct cs_blob *blob;
		    /* no embedded signatures: look for detached ones */
		    blob = ubc_cs_blob_get(vp, -1, file_offset);
		    if (blob != NULL) {
			    /* get flags to be applied to the process */
			    result->csflags |= blob->csb_flags;
		    }
	    }

		/* Make sure if we need dyld, we got it */
		if (result->needs_dynlinker && !dlp) {
			ret = LOAD_FAILURE;
		}

	    if ((ret == LOAD_SUCCESS) && (dlp != 0)) {
		/*
		 * load the dylinker, and slide it by the independent DYLD ASLR
		 * offset regardless of the PIE-ness of the main binary.
		 */

		ret = load_dylinker(dlp, dlarchbits, map, thread, depth,
		                    dyld_aslr_offset, result);
	    }

	    if((ret == LOAD_SUCCESS) && (depth == 1)) {
			if (result->thread_count == 0) {
				ret = LOAD_FAILURE;
			}
	    }
	}

	if (kl_addr )
		kfree(kl_addr, kl_size);

	return(ret);
}
Esempio n. 20
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, caddr_t *kvap, int flags)
{
	vaddr_t va;
	bus_addr_t addr;
	int curseg;
	pt_entry_t *ptep/*, pte*/;

#ifdef DEBUG_DMA
	printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t,
	    segs, nsegs, (unsigned long)size, flags);
#endif	/* DEBUG_DMA */

	size = round_page(size);
	va = uvm_km_valloc(kernel_map, size);

	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
#ifdef DEBUG_DMA
			printf("wiring p%lx to v%lx", addr, va);
#endif	/* DEBUG_DMA */
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			pmap_enter(pmap_kernel(), va, addr,
			    VM_PROT_READ | VM_PROT_WRITE,
			    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
			/*
			 * If the memory must remain coherent with the
			 * cache then we must make the memory uncacheable
			 * in order to maintain virtual cache coherency.
			 * We must also guarantee the cache does not already
			 * contain the virtual addresses we are making
			 * uncacheable.
			 */
			if (flags & BUS_DMA_COHERENT) {
				cpu_dcache_wbinv_range(va, PAGE_SIZE);
				cpu_drain_writebuf();
				ptep = vtopte(va);
				*ptep &= ~L2_S_CACHE_MASK;
				PTE_SYNC(ptep);
				tlb_flush();
			}
#ifdef DEBUG_DMA
			ptep = vtopte(va);
			printf(" pte=v%p *pte=%x\n", ptep, *ptep);
#endif	/* DEBUG_DMA */
		}
	}
	pmap_update(pmap_kernel());
#ifdef DEBUG_DMA
	printf("dmamem_map: =%p\n", *kvap);
#endif	/* DEBUG_DMA */
	return (0);
}
Esempio n. 21
0
void *
initarm(struct arm_boot_params *abp)
{
    struct pv_addr  kernel_l1pt;
    struct pv_addr  dpcpu;
    int loop;
    u_int l1pagetable;
    vm_offset_t freemempos;
    vm_offset_t freemem_pt;
    vm_offset_t afterkern;
    vm_offset_t freemem_after;
    vm_offset_t lastaddr;
    int i, j;
    uint32_t memsize[PXA2X0_SDRAM_BANKS], memstart[PXA2X0_SDRAM_BANKS];

    lastaddr = parse_boot_param(abp);
    set_cpufuncs();
    pcpu_init(pcpup, 0, sizeof(struct pcpu));
    PCPU_SET(curthread, &thread0);

    /* Do basic tuning, hz etc */
    init_param1();

    freemempos = 0xa0200000;
    /* Define a macro to simplify memory allocation */
#define	valloc_pages(var, np)			\
	alloc_pages((var).pv_pa, (np));		\
	(var).pv_va = (var).pv_pa + 0x20000000;

#define alloc_pages(var, np)			\
	freemempos -= (np * PAGE_SIZE);		\
	(var) = freemempos;		\
	memset((char *)(var), 0, ((np) * PAGE_SIZE));

    while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
        freemempos -= PAGE_SIZE;
    valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
    for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
        if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
            valloc_pages(kernel_pt_table[loop],
                         L2_TABLE_SIZE / PAGE_SIZE);
        } else {
            kernel_pt_table[loop].pv_pa = freemempos +
                                          (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) *
                                          L2_TABLE_SIZE_REAL;
            kernel_pt_table[loop].pv_va =
                kernel_pt_table[loop].pv_pa + 0x20000000;
        }
    }
    freemem_pt = freemempos;
    freemempos = 0xa0100000;
    /*
     * Allocate a page for the system page mapped to V0x00000000
     * This page will just contain the system vectors and can be
     * shared by all processes.
     */
    valloc_pages(systempage, 1);

    /* Allocate dynamic per-cpu area. */
    valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
    dpcpu_init((void *)dpcpu.pv_va, 0);

    /* Allocate stacks for all modes */
    valloc_pages(irqstack, IRQ_STACK_SIZE);
    valloc_pages(abtstack, ABT_STACK_SIZE);
    valloc_pages(undstack, UND_STACK_SIZE);
    valloc_pages(kernelstack, KSTACK_PAGES);
    alloc_pages(minidataclean.pv_pa, 1);
    valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
#ifdef ARM_USE_SMALL_ALLOC
    freemempos -= PAGE_SIZE;
    freemem_pt = trunc_page(freemem_pt);
    freemem_after = freemempos - ((freemem_pt - 0xa0100000) /
                                  PAGE_SIZE) * sizeof(struct arm_small_page);
    arm_add_smallalloc_pages((void *)(freemem_after + 0x20000000)
                             , (void *)0xc0100000, freemem_pt - 0xa0100000, 1);
    freemem_after -= ((freemem_after - 0xa0001000) / PAGE_SIZE) *
                     sizeof(struct arm_small_page);
    arm_add_smallalloc_pages((void *)(freemem_after + 0x20000000)
                             , (void *)0xc0001000, trunc_page(freemem_after) - 0xa0001000, 0);
    freemempos = trunc_page(freemem_after);
    freemempos -= PAGE_SIZE;
#endif
    /*
     * Allocate memory for the l1 and l2 page tables. The scheme to avoid
     * wasting memory by allocating the l1pt on the first 16k memory was
     * taken from NetBSD rpc_machdep.c. NKPT should be greater than 12 for
     * this to work (which is supposed to be the case).
     */

    /*
     * Now we start construction of the L1 page table
     * We start by mapping the L2 page tables into the L1.
     * This means that we can replace L1 mappings later on if necessary
     */
    l1pagetable = kernel_l1pt.pv_va;

    /* Map the L2 pages tables in the L1 page table */
    pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00100000 - 1),
                   &kernel_pt_table[KERNEL_PT_SYS]);
#if 0 /* XXXBJR: What is this?  Don't know if there's an analogue. */
    pmap_link_l2pt(l1pagetable, IQ80321_IOPXS_VBASE,
                   &kernel_pt_table[KERNEL_PT_IOPXS]);
#endif
    pmap_link_l2pt(l1pagetable, KERNBASE,
                   &kernel_pt_table[KERNEL_PT_BEFOREKERN]);
    pmap_map_chunk(l1pagetable, KERNBASE, SDRAM_START, 0x100000,
                   VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    pmap_map_chunk(l1pagetable, KERNBASE + 0x100000, SDRAM_START + 0x100000,
                   0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
    pmap_map_chunk(l1pagetable, KERNBASE + 0x200000, SDRAM_START + 0x200000,
                   (((uint32_t)(lastaddr) - KERNBASE - 0x200000) + L1_S_SIZE) & ~(L1_S_SIZE - 1),
                   VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    freemem_after = ((int)lastaddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
    afterkern = round_page(((vm_offset_t)lastaddr + L1_S_SIZE) &
                           ~(L1_S_SIZE - 1));
    for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) {
        pmap_link_l2pt(l1pagetable, afterkern + i * 0x00100000,
                       &kernel_pt_table[KERNEL_PT_AFKERNEL + i]);
    }
    pmap_map_entry(l1pagetable, afterkern, minidataclean.pv_pa,
                   VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);

#ifdef ARM_USE_SMALL_ALLOC
    if ((freemem_after + 2 * PAGE_SIZE) <= afterkern) {
        arm_add_smallalloc_pages((void *)(freemem_after),
                                 (void*)(freemem_after + PAGE_SIZE),
                                 afterkern - (freemem_after + PAGE_SIZE), 0);
    }
#endif

    /* Map the Mini-Data cache clean area. */
    xscale_setup_minidata(l1pagetable, afterkern,
                          minidataclean.pv_pa);

    /* Map the vector page. */
    pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
                   VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    pmap_devmap_bootstrap(l1pagetable, pxa_devmap);

    /*
     * Give the XScale global cache clean code an appropriately
     * sized chunk of unmapped VA space starting at 0xff000000
     * (our device mappings end before this address).
     */
    xscale_cache_clean_addr = 0xff000000U;

    cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
    setttb(kernel_l1pt.pv_pa);
    cpu_tlb_flushID();
    cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));

    /*
     * Pages were allocated during the secondary bootstrap for the
     * stacks for different CPU modes.
     * We must now set the r13 registers in the different CPU modes to
     * point to these stacks.
     * Since the ARM stacks use STMFD etc. we must set r13 to the top end
     * of the stack memory.
     */
    set_stackptrs(0);

    /*
     * We must now clean the cache again....
     * Cleaning may be done by reading new data to displace any
     * dirty data in the cache. This will have happened in setttb()
     * but since we are boot strapping the addresses used for the read
     * may have just been remapped and thus the cache could be out
     * of sync. A re-clean after the switch will cure this.
     * After booting there are no gross relocations of the kernel thus
     * this problem will not occur after initarm().
     */
    cpu_idcache_wbinv_all();

    /*
     * Sort out bus_space for on-board devices.
     */
    pxa_obio_tag_init();

    /*
     * Fetch the SDRAM start/size from the PXA2X0 SDRAM configration
     * registers.
     */
    pxa_probe_sdram(obio_tag, PXA2X0_MEMCTL_BASE, memstart, memsize);

    physmem = 0;
    for (i = 0; i < PXA2X0_SDRAM_BANKS; i++) {
        physmem += memsize[i] / PAGE_SIZE;
    }

    /* Fire up consoles. */
    cninit();

    /* Set stack for exception handlers */
    data_abort_handler_address = (u_int)data_abort_handler;
    prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
    undefined_handler_address = (u_int)undefinedinstruction_bounce;
    undefined_init();

    init_proc0(kernelstack.pv_va);

    /* Enable MMU, I-cache, D-cache, write buffer. */
    arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);

    pmap_curmaxkvaddr = afterkern + PAGE_SIZE;
    /*
     * ARM USE_SMALL_ALLOC uses dump_avail, so it must be filled before
     * calling pmap_bootstrap.
     */
    i = 0;
    for (j = 0; j < PXA2X0_SDRAM_BANKS; j++) {
        if (memsize[j] > 0) {
            dump_avail[i++] = round_page(memstart[j]);
            dump_avail[i++] =
                trunc_page(memstart[j] + memsize[j]);
        }
    }
    dump_avail[i] = 0;
    dump_avail[i] = 0;
    vm_max_kernel_address = 0xd0000000;
    pmap_bootstrap(pmap_curmaxkvaddr, &kernel_l1pt);
    msgbufp = (void*)msgbufpv.pv_va;
    msgbufinit(msgbufp, msgbufsize);
    mutex_init();

    i = 0;
#ifdef ARM_USE_SMALL_ALLOC
    phys_avail[i++] = 0xa0000000;
    phys_avail[i++] = 0xa0001000; 	/*
					 *XXX: Gross hack to get our
					 * pages in the vm_page_array
					 . */
#endif
    for (j = 0; j < PXA2X0_SDRAM_BANKS; j++) {
        if (memsize[j] > 0) {
            phys_avail[i] = round_page(memstart[j]);
            dump_avail[i++] = round_page(memstart[j]);
            phys_avail[i] =
                trunc_page(memstart[j] + memsize[j]);
            dump_avail[i++] =
                trunc_page(memstart[j] + memsize[j]);
        }
    }

    dump_avail[i] = 0;
    phys_avail[i++] = 0;
    dump_avail[i] = 0;
    phys_avail[i] = 0;
#ifdef ARM_USE_SMALL_ALLOC
    phys_avail[2] = round_page(virtual_avail - KERNBASE + phys_avail[2]);
#else
    phys_avail[0] = round_page(virtual_avail - KERNBASE + phys_avail[0]);
#endif

    init_param2(physmem);
    kdb_init();
    return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
                     sizeof(struct pcb)));
}
Esempio n. 22
0
/*
 * Allocate physical memory from the given physical address range.
 * Called by DMA-safe memory allocation methods.
 */
int
_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    int flags, paddr_t low, paddr_t high)
{
	paddr_t curaddr, lastaddr;
	struct vm_page *m;
	struct pglist mlist;
	int curseg, error;

#ifdef DEBUG_DMA
	printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n",
	    t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high);
#endif	/* DEBUG_DMA */

	/* Always round the size. */
	size = round_page(size);

	TAILQ_INIT(&mlist);
	/*
	 * Allocate pages from the VM system.
	 */
	error = uvm_pglistalloc(size, low, high, alignment, boundary,
	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
	if (error)
		return (error);

	/*
	 * Compute the location, size, and number of segments actually
	 * returned by the VM code.
	 */
	m = TAILQ_FIRST(&mlist);
	curseg = 0;
	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
	segs[curseg].ds_len = PAGE_SIZE;
#ifdef DEBUG_DMA
		printf("alloc: page %lx\n", lastaddr);
#endif	/* DEBUG_DMA */
	m = TAILQ_NEXT(m, pageq);

	for (; m != TAILQ_END(&mlist); m = TAILQ_NEXT(m, pageq)) {
		curaddr = VM_PAGE_TO_PHYS(m);
#ifdef DIAGNOSTIC
		if (curaddr < low || curaddr >= high) {
			printf("uvm_pglistalloc returned non-sensical"
			    " address 0x%lx\n", curaddr);
			panic("_bus_dmamem_alloc_range");
		}
#endif	/* DIAGNOSTIC */
#ifdef DEBUG_DMA
		printf("alloc: page %lx\n", curaddr);
#endif	/* DEBUG_DMA */
		if (curaddr == (lastaddr + PAGE_SIZE))
			segs[curseg].ds_len += PAGE_SIZE;
		else {
			curseg++;
			segs[curseg].ds_addr = curaddr;
			segs[curseg].ds_len = PAGE_SIZE;
		}
		lastaddr = curaddr;
	}

	*rsegs = curseg + 1;

	return (0);
}
Esempio n. 23
0
bool _TPCircularBufferInit(TPCircularBuffer *buffer, int32_t length, size_t structSize) {

    assert(length > 0);

    if ( structSize != sizeof(TPCircularBuffer) ) {
        fprintf(stderr, "TPCircularBuffer: Header version mismatch. Check for old versions of TPCircularBuffer in your project\n");
        abort();
    }

    // Keep trying until we get our buffer, needed to handle race conditions
    int retries = 3;
    while ( true ) {

        buffer->length = (int32_t)round_page(length);    // We need whole page sizes

        // Temporarily allocate twice the length, so we have the contiguous address space to
        // support a second instance of the buffer directly after
        vm_address_t bufferAddress;
        kern_return_t result = vm_allocate(mach_task_self(),
                                           &bufferAddress,
                                           buffer->length * 2,
                                           VM_FLAGS_ANYWHERE); // allocate anywhere it'll fit
        if ( result != ERR_SUCCESS ) {
            if ( retries-- == 0 ) {
                reportResult(result, "Buffer allocation");
                return false;
            }
            // Try again if we fail
            continue;
        }

        // Now replace the second half of the allocation with a virtual copy of the first half. Deallocate the second half...
        result = vm_deallocate(mach_task_self(),
                               bufferAddress + buffer->length,
                               buffer->length);
        if ( result != ERR_SUCCESS ) {
            if ( retries-- == 0 ) {
                reportResult(result, "Buffer deallocation");
                return false;
            }
            // If this fails somehow, deallocate the whole region and try again
            vm_deallocate(mach_task_self(), bufferAddress, buffer->length);
            continue;
        }

        // Re-map the buffer to the address space immediately after the buffer
        vm_address_t virtualAddress = bufferAddress + buffer->length;
        vm_prot_t cur_prot, max_prot;
        result = vm_remap(mach_task_self(),
                          &virtualAddress,   // mirror target
                          buffer->length,    // size of mirror
                          0,                 // auto alignment
                          0,                 // force remapping to virtualAddress
                          mach_task_self(),  // same task
                          bufferAddress,     // mirror source
                          0,                 // MAP READ-WRITE, NOT COPY
                          &cur_prot,         // unused protection struct
                          &max_prot,         // unused protection struct
                          VM_INHERIT_DEFAULT);
        if ( result != ERR_SUCCESS ) {
            if ( retries-- == 0 ) {
                reportResult(result, "Remap buffer memory");
                return false;
            }
            // If this remap failed, we hit a race condition, so deallocate and try again
            vm_deallocate(mach_task_self(), bufferAddress, buffer->length);
            continue;
        }

        if ( virtualAddress != bufferAddress+buffer->length ) {
            // If the memory is not contiguous, clean up both allocated buffers and try again
            if ( retries-- == 0 ) {
                printf("Couldn't map buffer memory to end of buffer\n");
                return false;
            }

            vm_deallocate(mach_task_self(), virtualAddress, buffer->length);
            vm_deallocate(mach_task_self(), bufferAddress, buffer->length);
            continue;
        }

        buffer->buffer = (void*)bufferAddress;
        buffer->fillCount = 0;
        buffer->head = buffer->tail = 0;
        buffer->atomic = true;

        return true;
    }
    return false;
}
Esempio n. 24
0
/*
 * Internal version of mmap.
 * Currently used by mmap, exec, and sys5 shared memory.
 * Handle is either a vnode pointer or NULL for MAP_ANON.
 * 
 * No requirements
 */
int
vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
	vm_prot_t maxprot, int flags, void *handle, vm_ooffset_t foff)
{
	boolean_t fitit;
	vm_object_t object;
	vm_offset_t eaddr;
	vm_size_t   esize;
	vm_size_t   align;
	int (*uksmap)(cdev_t dev, vm_page_t fake);
	struct vnode *vp;
	struct thread *td = curthread;
	struct proc *p;
	int rv = KERN_SUCCESS;
	off_t objsize;
	int docow;
	int error;

	if (size == 0)
		return (0);

	objsize = round_page(size);
	if (objsize < size)
		return (EINVAL);
	size = objsize;

	lwkt_gettoken(&map->token);
	
	/*
	 * XXX messy code, fixme
	 *
	 * NOTE: Overflow checks require discrete statements or GCC4
	 * will optimize it out.
	 */
	if ((p = curproc) != NULL && map == &p->p_vmspace->vm_map) {
		esize = map->size + size;	/* workaround gcc4 opt */
		if (esize < map->size ||
		    esize > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
			lwkt_reltoken(&map->token);
			return(ENOMEM);
		}
	}

	/*
	 * We currently can only deal with page aligned file offsets.
	 * The check is here rather than in the syscall because the
	 * kernel calls this function internally for other mmaping
	 * operations (such as in exec) and non-aligned offsets will
	 * cause pmap inconsistencies...so we want to be sure to
	 * disallow this in all cases.
	 *
	 * NOTE: Overflow checks require discrete statements or GCC4
	 * will optimize it out.
	 */
	if (foff & PAGE_MASK) {
		lwkt_reltoken(&map->token);
		return (EINVAL);
	}

	/*
	 * Handle alignment.  For large memory maps it is possible
	 * that the MMU can optimize the page table so align anything
	 * that is a multiple of SEG_SIZE to SEG_SIZE.
	 *
	 * Also align any large mapping (bigger than 16x SG_SIZE) to a
	 * SEG_SIZE address boundary.
	 */
	if (flags & MAP_SIZEALIGN) {
		align = size;
		if ((align ^ (align - 1)) != (align << 1) - 1) {
			lwkt_reltoken(&map->token);
			return (EINVAL);
		}
	} else if ((flags & MAP_FIXED) == 0 &&
		   ((size & SEG_MASK) == 0 || size > SEG_SIZE * 16)) {
		align = SEG_SIZE;
	} else {
		align = PAGE_SIZE;
	}

	if ((flags & (MAP_FIXED | MAP_TRYFIXED)) == 0) {
		fitit = TRUE;
		*addr = round_page(*addr);
	} else {
		if (*addr != trunc_page(*addr)) {
			lwkt_reltoken(&map->token);
			return (EINVAL);
		}
		eaddr = *addr + size;
		if (eaddr < *addr) {
			lwkt_reltoken(&map->token);
			return (EINVAL);
		}
		fitit = FALSE;
		if ((flags & MAP_TRYFIXED) == 0)
			vm_map_remove(map, *addr, *addr + size);
	}

	uksmap = NULL;

	/*
	 * Lookup/allocate object.
	 */
	if (flags & MAP_ANON) {
		/*
		 * Unnamed anonymous regions always start at 0.
		 */
		if (handle) {
			/*
			 * Default memory object
			 */
			object = default_pager_alloc(handle, objsize,
						     prot, foff);
			if (object == NULL) {
				lwkt_reltoken(&map->token);
				return(ENOMEM);
			}
			docow = MAP_PREFAULT_PARTIAL;
		} else {
			/*
			 * Implicit single instance of a default memory
			 * object, so we don't need a VM object yet.
			 */
			foff = 0;
			object = NULL;
			docow = 0;
		}
		vp = NULL;
	} else {
		vp = (struct vnode *)handle;

		/*
		 * Non-anonymous mappings of VCHR (aka not /dev/zero)
		 * cannot specify MAP_STACK or MAP_VPAGETABLE.
		 */
		if (vp->v_type == VCHR) {
			if (flags & (MAP_STACK | MAP_VPAGETABLE)) {
				lwkt_reltoken(&map->token);
				return(EINVAL);
			}
		}

		if (vp->v_type == VCHR && vp->v_rdev->si_ops->d_uksmap) {
			/*
			 * Device mappings without a VM object, typically
			 * sharing permanently allocated kernel memory or
			 * process-context-specific (per-process) data.
			 *
			 * Force them to be shared.
			 */
			uksmap = vp->v_rdev->si_ops->d_uksmap;
			object = NULL;
			docow = MAP_PREFAULT_PARTIAL;
			flags &= ~(MAP_PRIVATE|MAP_COPY);
			flags |= MAP_SHARED;
		} else if (vp->v_type == VCHR) {
			/*
			 * Device mappings (device size unknown?).
			 * Force them to be shared.
			 */
			error = dev_dmmap_single(vp->v_rdev, &foff, objsize,
						&object, prot, NULL);

			if (error == ENODEV) {
				handle = (void *)(intptr_t)vp->v_rdev;
				object = dev_pager_alloc(handle, objsize, prot, foff);
				if (object == NULL) {
					lwkt_reltoken(&map->token);
					return(EINVAL);
				}
			} else if (error) {
				lwkt_reltoken(&map->token);
				return(error);
			}

			docow = MAP_PREFAULT_PARTIAL;
			flags &= ~(MAP_PRIVATE|MAP_COPY);
			flags |= MAP_SHARED;
		} else {
			/*
			 * Regular file mapping (typically).  The attribute
			 * check is for the link count test only.  mmapable
			 * vnodes must already have a VM object assigned.
			 */
			struct vattr vat;
			int error;

			error = VOP_GETATTR(vp, &vat);
			if (error) {
				lwkt_reltoken(&map->token);
				return (error);
			}
			docow = MAP_PREFAULT_PARTIAL;
			object = vnode_pager_reference(vp);
			if (object == NULL && vp->v_type == VREG) {
				lwkt_reltoken(&map->token);
				kprintf("Warning: cannot mmap vnode %p, no "
					"object\n", vp);
				return(EINVAL);
			}

			/*
			 * If it is a regular file without any references
			 * we do not need to sync it.
			 */
			if (vp->v_type == VREG && vat.va_nlink == 0) {
				flags |= MAP_NOSYNC;
			}
		}
	}

	/*
	 * Deal with the adjusted flags
	 */
	if ((flags & (MAP_ANON|MAP_SHARED)) == 0)
		docow |= MAP_COPY_ON_WRITE;
	if (flags & MAP_NOSYNC)
		docow |= MAP_DISABLE_SYNCER;
	if (flags & MAP_NOCORE)
		docow |= MAP_DISABLE_COREDUMP;

#if defined(VM_PROT_READ_IS_EXEC)
	if (prot & VM_PROT_READ)
		prot |= VM_PROT_EXECUTE;

	if (maxprot & VM_PROT_READ)
		maxprot |= VM_PROT_EXECUTE;
#endif

	/*
	 * This may place the area in its own page directory if (size) is
	 * large enough, otherwise it typically returns its argument.
	 *
	 * (object can be NULL)
	 */
	if (fitit) {
		*addr = pmap_addr_hint(object, *addr, size);
	}

	/*
	 * Stack mappings need special attention.
	 *
	 * Mappings that use virtual page tables will default to storing
	 * the page table at offset 0.
	 */
	if (uksmap) {
		rv = vm_map_find(map, uksmap, vp->v_rdev,
				 foff, addr, size,
				 align, fitit,
				 VM_MAPTYPE_UKSMAP, VM_SUBSYS_MMAP,
				 prot, maxprot, docow);
	} else if (flags & MAP_STACK) {
		rv = vm_map_stack(map, *addr, size, flags,
				  prot, maxprot, docow);
	} else if (flags & MAP_VPAGETABLE) {
		rv = vm_map_find(map, object, NULL,
				 foff, addr, size,
				 align, fitit,
				 VM_MAPTYPE_VPAGETABLE, VM_SUBSYS_MMAP,
				 prot, maxprot, docow);
	} else {
		rv = vm_map_find(map, object, NULL,
				 foff, addr, size,
				 align, fitit,
				 VM_MAPTYPE_NORMAL, VM_SUBSYS_MMAP,
				 prot, maxprot, docow);
	}

	if (rv != KERN_SUCCESS) {
		/*
		 * Lose the object reference. Will destroy the
		 * object if it's an unnamed anonymous mapping
		 * or named anonymous without other references.
		 *
		 * (NOTE: object can be NULL)
		 */
		vm_object_deallocate(object);
		goto out;
	}

	/*
	 * Shared memory is also shared with children.
	 */
	if (flags & (MAP_SHARED|MAP_INHERIT)) {
		rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE);
		if (rv != KERN_SUCCESS) {
			vm_map_remove(map, *addr, *addr + size);
			goto out;
		}
	}

	/* If a process has marked all future mappings for wiring, do so */
	if ((rv == KERN_SUCCESS) && (map->flags & MAP_WIREFUTURE))
		vm_map_unwire(map, *addr, *addr + size, FALSE);

	/*
	 * Set the access time on the vnode
	 */
	if (vp != NULL)
		vn_mark_atime(vp, td);
out:
	lwkt_reltoken(&map->token);
	
	switch (rv) {
	case KERN_SUCCESS:
		return (0);
	case KERN_INVALID_ADDRESS:
	case KERN_NO_SPACE:
		return (ENOMEM);
	case KERN_PROTECTION_FAILURE:
		return (EACCES);
	default:
		return (EINVAL);
	}
}
Esempio n. 25
0
void ppc_vm_init(unsigned int memory_size, boot_args *args)
{
	unsigned int htabmask;
	unsigned int i;
	vm_offset_t  addr;
	int boot_task_end_offset;
#if	NCPUS > 1
	const char *cpus;
#endif	/* NCPUS > 1 */

	printf("mem_size = %d M\n",memory_size / (1024 * 1024));

#ifdef __MACHO__
	/* Now retrieve addresses for end, edata, and etext 
	 * from MACH-O headers.
	 */


	etext = (vm_offset_t) sectTEXTB + sectSizeTEXT;
	edata = (vm_offset_t) sectDATAB + sectSizeDATA;
	end = getlastaddr();
#endif

	/* Stitch valid memory regions together - they may be contiguous
	 * even though they're not already glued together
	 */

	/* Go through the list of memory regions passed in via the args
	 * and copy valid entries into the pmap_mem_regions table, adding
	 * further calculated entries.
	 */
	
	
	/* Initialise the pmap system, using space above `first_avail'*/

#ifndef	__MACHO__
	free_regions[free_regions_count].start =
	  	round_page((unsigned int)&_ExceptionVectorsEnd -
			   (unsigned int)&_ExceptionVectorsStart);
#else
	/* On MACH-O generated kernels, the Exception Vectors
	 * are already mapped and loaded at 0 -- no relocation
	 * or freeing of memory is needed
	 */

	free_regions[free_regions_count].start = round_page((unsigned int)&_ExceptionVectorsEnd) + 4096;
#endif

	/* If we are on a PDM machine memory at 1M might be used
	 * for video. TODO NMGS call video driver to do this
	 * somehow
	 */


	/* For PowerMac, first_avail is set to above the bootstrap task.
         * TODO NMGS - different screen modes - might free mem?
         */

	first_avail = round_page(args->first_avail);


	/* map in the exception vectors */
	/*
	 * map the kernel text, data and bss. Don't forget other regions too
	 */
	for (i = 0; i < args->kern_info.region_count; i++) {
#if	MACH_KDB
		if (args->kern_info.regions[i].prot == VM_PROT_NONE &&
		    i == args->kern_info.region_count - 1) {
			/* assume that's the kernel symbol table */
			kern_sym_start = args->kern_info.regions[i].addr;
			kern_sym_size = args->kern_info.regions[i].size;
			printf("kernel symbol table at 0x%x size 0x%x\n",
			       kern_sym_start, kern_sym_size);
			args->kern_info.regions[i].prot |=
				(VM_PROT_WRITE|VM_PROT_READ);
		}
#endif	/* MACH_KDB */

#ifdef __MACHO__
		/* Skip the VECTORS segment */
		if (args->kern_info.regions[i].addr == 0)
			continue;
#endif

	boot_region_count = args->task_info.region_count;
	boot_size = 0;
	boot_task_end_offset = 0;
	/* Map bootstrap task pages 1-1 so that user_bootstrap can find it */
	for (i = 0; i < boot_region_count; i++) {
		if (args->task_info.regions[i].mapped) {
			/* kernel requires everything page aligned */
#if DEBUG
			printf("mapping virt 0x%08x to phys 0x%08x end 0x%x, prot=0x%b\n",
				 ppc_trunc_page(args->task_info.base_addr + 
					args->task_info.regions[i].offset),
				 ppc_trunc_page(args->task_info.base_addr + 
					args->task_info.regions[i].offset),
				 ppc_round_page(args->task_info.base_addr + 
					args->task_info.regions[i].offset +
					args->task_info.regions[i].size),
				 args->task_info.regions[i].prot,
				 "\x10\1READ\2WRITE\3EXEC");
#endif /* DEBUG */

			(void)pmap_map(
				  ppc_trunc_page(args->task_info.base_addr + 
				      args->task_info.regions[i].offset),
			          ppc_trunc_page(args->task_info.base_addr + 
				      args->task_info.regions[i].offset),
			          ppc_round_page(args->task_info.base_addr +
				      args->task_info.regions[i].offset +
				      args->task_info.regions[i].size),
			          args->task_info.regions[i].prot);

			/* Count the size of mapped space */
			boot_size += args->task_info.regions[i].size;

			/* There may be an overlapping physical page
			 * mapped to two different virtual addresses
			 */
			if (boot_task_end_offset >
			    args->task_info.regions[i].offset) {
				boot_size -= boot_task_end_offset - 
					args->task_info.regions[i].offset;
#if DEBUG
				printf("WARNING - bootstrap overlaps regions\n");
#endif /* DEBUG */
			}

			boot_task_end_offset =
				args->task_info.regions[i].offset +
				args->task_info.regions[i].size;
		}
	}

	if (boot_region_count) {

		/* Add a new region to the bootstrap task for it's stack */
		args->task_info.regions[boot_region_count].addr =
			BOOT_STACK_BASE;
		args->task_info.regions[boot_region_count].size =
			BOOT_STACK_SIZE;
		args->task_info.regions[boot_region_count].mapped = FALSE;
		boot_region_count++;
		
		boot_start        = args->task_info.base_addr;
		boot_region_desc  = (vm_offset_t) args->task_info.regions;
		/* TODO NMGS need to put param info onto top of boot stack */
		boot_task_thread_state.r1   = BOOT_STACK_PTR-0x100;
		boot_task_thread_state.srr0 = args->task_info.entry;
		boot_task_thread_state.srr1 =
			MSR_MARK_SYSCALL(MSR_EXPORT_MASK_SET);
		
		boot_thread_state_flavor = PPC_THREAD_STATE;
		boot_thread_state_count  = PPC_THREAD_STATE_COUNT;
		boot_thread_state        =
			(thread_state_t)&boot_task_thread_state;
	}



}
Esempio n. 26
0
/* 
 * mmap_args(void *addr, size_t len, int prot, int flags, int fd,
 *		long pad, off_t pos)
 *
 * Memory Map (mmap) system call.  Note that the file offset
 * and address are allowed to be NOT page aligned, though if
 * the MAP_FIXED flag it set, both must have the same remainder
 * modulo the PAGE_SIZE (POSIX 1003.1b).  If the address is not
 * page-aligned, the actual mapping starts at trunc_page(addr)
 * and the return value is adjusted up by the page offset.
 *
 * Generally speaking, only character devices which are themselves
 * memory-based, such as a video framebuffer, can be mmap'd.  Otherwise
 * there would be no cache coherency between a descriptor and a VM mapping
 * both to the same character device.
 *
 * Block devices can be mmap'd no matter what they represent.  Cache coherency
 * is maintained as long as you do not write directly to the underlying
 * character device.
 *
 * No requirements
 */
int
kern_mmap(struct vmspace *vms, caddr_t uaddr, size_t ulen,
	  int uprot, int uflags, int fd, off_t upos, void **res)
{
	struct thread *td = curthread;
 	struct proc *p = td->td_proc;
	struct file *fp = NULL;
	struct vnode *vp;
	vm_offset_t addr;
	vm_offset_t tmpaddr;
	vm_size_t size, pageoff;
	vm_prot_t prot, maxprot;
	void *handle;
	int flags, error;
	off_t pos;
	vm_object_t obj;

	KKASSERT(p);

	addr = (vm_offset_t) uaddr;
	size = ulen;
	prot = uprot & VM_PROT_ALL;
	flags = uflags;
	pos = upos;

	/*
	 * Make sure mapping fits into numeric range etc.
	 *
	 * NOTE: We support the full unsigned range for size now.
	 */
	if (((flags & MAP_ANON) && (fd != -1 || pos != 0)))
		return (EINVAL);

	if (size == 0)
		return (EINVAL);

	if (flags & MAP_STACK) {
		if ((fd != -1) ||
		    ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
			return (EINVAL);
		flags |= MAP_ANON;
		pos = 0;
	}

	/*
	 * Virtual page tables cannot be used with MAP_STACK.  Apart from
	 * it not making any sense, the aux union is used by both
	 * types.
	 *
	 * Because the virtual page table is stored in the backing object
	 * and might be updated by the kernel, the mapping must be R+W.
	 */
	if (flags & MAP_VPAGETABLE) {
		if (vkernel_enable == 0)
			return (EOPNOTSUPP);
		if (flags & MAP_STACK)
			return (EINVAL);
		if ((prot & (PROT_READ|PROT_WRITE)) != (PROT_READ|PROT_WRITE))
			return (EINVAL);
	}

	/*
	 * Align the file position to a page boundary,
	 * and save its page offset component.
	 */
	pageoff = (pos & PAGE_MASK);
	pos -= pageoff;

	/* Adjust size for rounding (on both ends). */
	size += pageoff;			/* low end... */
	size = (vm_size_t) round_page(size);	/* hi end */
	if (size < ulen)			/* wrap */
		return(EINVAL);

	/*
	 * Check for illegal addresses.  Watch out for address wrap... Note
	 * that VM_*_ADDRESS are not constants due to casts (argh).
	 */
	if (flags & (MAP_FIXED | MAP_TRYFIXED)) {
		/*
		 * The specified address must have the same remainder
		 * as the file offset taken modulo PAGE_SIZE, so it
		 * should be aligned after adjustment by pageoff.
		 */
		addr -= pageoff;
		if (addr & PAGE_MASK)
			return (EINVAL);

		/*
		 * Address range must be all in user VM space and not wrap.
		 */
		tmpaddr = addr + size;
		if (tmpaddr < addr)
			return (EINVAL);
		if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS)
			return (EINVAL);
		if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS)
			return (EINVAL);
	} else {
		/*
		 * Get a hint of where to map. It also provides mmap offset
		 * randomization if enabled.
		 */
		addr = vm_map_hint(p, addr, prot);
	}

	if (flags & MAP_ANON) {
		/*
		 * Mapping blank space is trivial.
		 */
		handle = NULL;
		maxprot = VM_PROT_ALL;
	} else {
		/*
		 * Mapping file, get fp for validation. Obtain vnode and make
		 * sure it is of appropriate type.
		 */
		fp = holdfp(p->p_fd, fd, -1);
		if (fp == NULL)
			return (EBADF);
		if (fp->f_type != DTYPE_VNODE) {
			error = EINVAL;
			goto done;
		}
		/*
		 * POSIX shared-memory objects are defined to have
		 * kernel persistence, and are not defined to support
		 * read(2)/write(2) -- or even open(2).  Thus, we can
		 * use MAP_ASYNC to trade on-disk coherence for speed.
		 * The shm_open(3) library routine turns on the FPOSIXSHM
		 * flag to request this behavior.
		 */
		if (fp->f_flag & FPOSIXSHM)
			flags |= MAP_NOSYNC;
		vp = (struct vnode *) fp->f_data;

		/*
		 * Validate the vnode for the operation.
		 */
		switch(vp->v_type) {
		case VREG:
			/*
			 * Get the proper underlying object
			 */
			if ((obj = vp->v_object) == NULL) {
				error = EINVAL;
				goto done;
			}
			KKASSERT((struct vnode *)obj->handle == vp);
			break;
		case VCHR:
			/*
			 * Make sure a device has not been revoked.  
			 * Mappability is handled by the device layer.
			 */
			if (vp->v_rdev == NULL) {
				error = EBADF;
				goto done;
			}
			break;
		default:
			/*
			 * Nothing else is mappable.
			 */
			error = EINVAL;
			goto done;
		}

		/*
		 * XXX hack to handle use of /dev/zero to map anon memory (ala
		 * SunOS).
		 */
		if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) {
			handle = NULL;
			maxprot = VM_PROT_ALL;
			flags |= MAP_ANON;
			pos = 0;
		} else {
			/*
			 * cdevs does not provide private mappings of any kind.
			 */
			if (vp->v_type == VCHR &&
			    (flags & (MAP_PRIVATE|MAP_COPY))) {
				error = EINVAL;
				goto done;
			}
			/*
			 * Ensure that file and memory protections are
			 * compatible.  Note that we only worry about
			 * writability if mapping is shared; in this case,
			 * current and max prot are dictated by the open file.
			 * XXX use the vnode instead?  Problem is: what
			 * credentials do we use for determination? What if
			 * proc does a setuid?
			 */
			maxprot = VM_PROT_EXECUTE;	/* ??? */
			if (fp->f_flag & FREAD) {
				maxprot |= VM_PROT_READ;
			} else if (prot & PROT_READ) {
				error = EACCES;
				goto done;
			}
			/*
			 * If we are sharing potential changes (either via
			 * MAP_SHARED or via the implicit sharing of character
			 * device mappings), and we are trying to get write
			 * permission although we opened it without asking
			 * for it, bail out.  Check for superuser, only if
			 * we're at securelevel < 1, to allow the XIG X server
			 * to continue to work.
			 */
			if ((flags & MAP_SHARED) != 0 || vp->v_type == VCHR) {
				if ((fp->f_flag & FWRITE) != 0) {
					struct vattr va;
					if ((error = VOP_GETATTR(vp, &va))) {
						goto done;
					}
					if ((va.va_flags &
					    (IMMUTABLE|APPEND)) == 0) {
						maxprot |= VM_PROT_WRITE;
					} else if (prot & PROT_WRITE) {
						error = EPERM;
						goto done;
					}
				} else if ((prot & PROT_WRITE) != 0) {
					error = EACCES;
					goto done;
				}
			} else {
				maxprot |= VM_PROT_WRITE;
			}
			handle = (void *)vp;
		}
	}

	lwkt_gettoken(&vms->vm_map.token);

	/*
	 * Do not allow more then a certain number of vm_map_entry structures
	 * per process.  Scale with the number of rforks sharing the map
	 * to make the limit reasonable for threads.
	 */
	if (max_proc_mmap && 
	    vms->vm_map.nentries >= max_proc_mmap * vmspace_getrefs(vms)) {
		error = ENOMEM;
		lwkt_reltoken(&vms->vm_map.token);
		goto done;
	}

	error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot,
			flags, handle, pos);
	if (error == 0)
		*res = (void *)(addr + pageoff);

	lwkt_reltoken(&vms->vm_map.token);
done:
	if (fp)
		fdrop(fp);

	return (error);
}
Esempio n. 27
0
static int
load_coff_section(struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset,
		  caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot)
{
	size_t map_len;
	vm_offset_t map_offset;
	vm_offset_t map_addr;
	int error;
	unsigned char *data_buf = 0;
	size_t copy_len;

	map_offset = trunc_page(offset);
	map_addr = trunc_page((vm_offset_t)vmaddr);

	if (memsz > filsz) {
		/*
		 * We have the stupid situation that
		 * the section is longer than it is on file,
		 * which means it has zero-filled areas, and
		 * we have to work for it.  Stupid iBCS!
		 */
		map_len = trunc_page(offset + filsz) - trunc_page(map_offset);
	} else {
		/*
		 * The only stuff we care about is on disk, and we
		 * don't care if we map in more than is really there.
		 */
		map_len = round_page(offset + filsz) - trunc_page(map_offset);
	}

	DPRINTF(("%s(%d):  vm_mmap(&vmspace->vm_map, &0x%08jx, 0x%x, 0x%x, "
		"VM_PROT_ALL, MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, 0x%x)\n",
		__FILE__, __LINE__, (uintmax_t)map_addr, map_len, prot,
	        map_offset));

	if ((error = vm_mmap(&vmspace->vm_map,
			     &map_addr,
			     map_len,
			     prot,
			     VM_PROT_ALL,
			     MAP_PRIVATE | MAP_FIXED,
			     OBJT_VNODE,
			     vp,
			     map_offset)) != 0)
		return error;

	if (memsz == filsz) {
		/* We're done! */
		return 0;
	}

	/*
	 * Now we have screwball stuff, to accomodate stupid COFF.
	 * We have to map the remaining bit of the file into the kernel's
	 * memory map, allocate some anonymous memory, copy that last
	 * bit into it, and then we're done. *sigh*
	 * For clean-up reasons, we actally map in the file last.
	 */

	copy_len = (offset + filsz) - trunc_page(offset + filsz);
	map_addr = trunc_page((vm_offset_t)vmaddr + filsz);
	map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr;

	DPRINTF(("%s(%d): vm_map_find(&vmspace->vm_map, NULL, 0, &0x%08jx,0x%x, VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0)\n", __FILE__, __LINE__, (uintmax_t)map_addr, map_len));

	if (map_len != 0) {
		error = vm_map_find(&vmspace->vm_map, NULL, 0, &map_addr,
		    map_len, VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0);
		if (error)
			return (vm_mmap_to_errno(error));
	}

	if ((error = vm_mmap(exec_map,
			    (vm_offset_t *) &data_buf,
			    PAGE_SIZE,
			    VM_PROT_READ,
			    VM_PROT_READ,
			    0,
			    OBJT_VNODE,
			    vp,
			    trunc_page(offset + filsz))) != 0)
		return error;

	error = copyout(data_buf, (caddr_t) map_addr, copy_len);

	if (vm_map_remove(exec_map,
			  (vm_offset_t) data_buf,
			  (vm_offset_t) data_buf + PAGE_SIZE))
		panic("load_coff_section vm_map_remove failed");

	return error;
}
Esempio n. 28
0
/*
 * msync system call handler 
 *
 * msync_args(void *addr, size_t len, int flags)
 *
 * No requirements
 */
int
sys_msync(struct msync_args *uap)
{
	struct proc *p = curproc;
	vm_offset_t addr;
	vm_offset_t tmpaddr;
	vm_size_t size, pageoff;
	int flags;
	vm_map_t map;
	int rv;

	addr = (vm_offset_t) uap->addr;
	size = uap->len;
	flags = uap->flags;

	pageoff = (addr & PAGE_MASK);
	addr -= pageoff;
	size += pageoff;
	size = (vm_size_t) round_page(size);
	if (size < uap->len)		/* wrap */
		return(EINVAL);
	tmpaddr = addr + size;		/* workaround gcc4 opt */
	if (tmpaddr < addr)		/* wrap */
		return(EINVAL);

	if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
		return (EINVAL);

	map = &p->p_vmspace->vm_map;

	/*
	 * map->token serializes extracting the address range for size == 0
	 * msyncs with the vm_map_clean call; if the token were not held
	 * across the two calls, an intervening munmap/mmap pair, for example,
	 * could cause msync to occur on a wrong region.
	 */
	lwkt_gettoken(&map->token);

	/*
	 * XXX Gak!  If size is zero we are supposed to sync "all modified
	 * pages with the region containing addr".  Unfortunately, we don't
	 * really keep track of individual mmaps so we approximate by flushing
	 * the range of the map entry containing addr. This can be incorrect
	 * if the region splits or is coalesced with a neighbor.
	 */
	if (size == 0) {
		vm_map_entry_t entry;

		vm_map_lock_read(map);
		rv = vm_map_lookup_entry(map, addr, &entry);
		if (rv == FALSE) {
			vm_map_unlock_read(map);
			rv = KERN_INVALID_ADDRESS;
			goto done;
		}
		addr = entry->start;
		size = entry->end - entry->start;
		vm_map_unlock_read(map);
	}

	/*
	 * Clean the pages and interpret the return value.
	 */
	rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0,
			  (flags & MS_INVALIDATE) != 0);
done:
	lwkt_reltoken(&map->token);

	switch (rv) {
	case KERN_SUCCESS:
		break;
	case KERN_INVALID_ADDRESS:
		return (EINVAL);	/* Sun returns ENOMEM? */
	case KERN_FAILURE:
		return (EIO);
	default:
		return (EINVAL);
	}

	return (0);
}
Esempio n. 29
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	int o;
	u_long c = 0, v;
	struct iovec *iov;
	int error = 0;
	vm_offset_t addr, eaddr;

	GIANT_REQUIRED;

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			v = uio->uio_offset;
kmemphys:
			o = v & PAGE_MASK;
			c = min(uio->uio_resid, (u_int)(PAGE_SIZE - o));
			error = uiomove((void *)PHYS_TO_DMAP(v), (int)c, uio);
			continue;
		}
		else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
			v = uio->uio_offset;

			if (v >= DMAP_MIN_ADDRESS && v < DMAP_MAX_ADDRESS) {
				v = DMAP_TO_PHYS(v);
				goto kmemphys;
			}

			c = iov->iov_len;

			/*
			 * Make sure that all of the pages are currently
			 * resident so that we don't create any zero-fill
			 * pages.
			 */
			addr = trunc_page(v);
			eaddr = round_page(v + c);

			if (addr < VM_MIN_KERNEL_ADDRESS)
				return (EFAULT);
			for (; addr < eaddr; addr += PAGE_SIZE) 
				if (pmap_extract(kernel_pmap, addr) == 0)
					return (EFAULT);

			if (!kernacc((caddr_t)(long)v, c,
			    uio->uio_rw == UIO_READ ? 
			    VM_PROT_READ : VM_PROT_WRITE))
				return (EFAULT);

			error = uiomove((caddr_t)(long)v, (int)c, uio);
			continue;
		}
		/* else panic! */
	}
	return (error);
}
Esempio n. 30
0
void
cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
    void (*func)(void *), void *arg)
{
	struct pcb *pcbp;
	struct trapframe *tf;
	register_t sp, osp;

#ifdef DIAGNOSTIC
	if (round_page(sizeof(struct user)) > PAGE_SIZE)
		panic("USPACE too small for user");
#endif

	/* Flush the parent process out of the FPU. */
	hppa_fpu_flush(l1);

	/* Now copy the parent PCB into the child. */
	pcbp = &l2->l_addr->u_pcb;
	bcopy(&l1->l_addr->u_pcb, pcbp, sizeof(*pcbp));

	sp = (register_t)l2->l_addr + PAGE_SIZE;
	l2->l_md.md_regs = tf = (struct trapframe *)sp;
	sp += sizeof(struct trapframe);
	bcopy(l1->l_md.md_regs, tf, sizeof(*tf));

	/*
	 * cpu_swapin() is supposed to fill out all the PAs
	 * we gonna need in locore
	 */
	cpu_swapin(l2);

	/* Activate this process' pmap. */
	pmap_activate(l2);

	/*
	 * theoretically these could be inherited from the father,
	 * but just in case.
	 */
	tf->tf_sr7 = HPPA_SID_KERNEL;
	mfctl(CR_EIEM, tf->tf_eiem);
	tf->tf_ipsw = PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I /* | PSW_L */;
	pcbp->pcb_fpregs[HPPA_NFPREGS] = 0;

	/*
	 * Set up return value registers as libc:fork() expects
	 */
	tf->tf_ret0 = l1->l_proc->p_pid;
	tf->tf_ret1 = 1;	/* ischild */
	tf->tf_t1 = 0;		/* errno */

	/*
	 * If specified, give the child a different stack.
	 */
	if (stack != NULL)
		tf->tf_sp = (register_t)stack;

	/*
	 * Build a stack frame for the cpu_switch & co.
	 */
	osp = sp;
	sp += HPPA_FRAME_SIZE + 16*4; /* std frame + calee-save registers */
	*HPPA_FRAME_CARG(0, sp) = tf->tf_sp;
	*HPPA_FRAME_CARG(1, sp) = KERNMODE(func);
	*HPPA_FRAME_CARG(2, sp) = (register_t)arg;
	*(register_t*)(sp + HPPA_FRAME_PSP) = osp;
	*(register_t*)(sp + HPPA_FRAME_CRP) =
		(register_t)switch_trampoline;
	tf->tf_sp = sp;
	fdcache(HPPA_SID_KERNEL, (vaddr_t)l2->l_addr, sp - (vaddr_t)l2->l_addr);
}