Ejemplo n.º 1
0
static int
xboxfb_init(struct vt_device *vd)
{
	struct fb_info *info;
	int i;

	if (!arch_i386_is_xbox)
		return (CN_DEAD);

	info = &xboxfb_info;
	/*
	 * We must make a mapping from video framebuffer memory
	 * to real. This is very crude:  we map the entire
	 * videomemory to PAGE_SIZE! Since our kernel lives at
	 * it's relocated address range (0xc0xxxxxx), it won't
	 * care.
	 *
	 * We use address PAGE_SIZE and up so we can still trap
	 * NULL pointers.  Once the real init is called, the
	 * mapping will be done via the OS and stored in a more
	 * sensible location ... but since we're not fully
	 * initialized, this is our only way to go :-(
	 */
	for (i = 0; i < (XBOX_FB_SIZE / PAGE_SIZE); i++) {
		pmap_kenter(((i + 1) * PAGE_SIZE),
		    XBOX_FB_START + (i * PAGE_SIZE));
	}
	pmap_kenter((i + 1) * PAGE_SIZE,
	    XBOX_FB_START_PTR - XBOX_FB_START_PTR % PAGE_SIZE);

	/* Ensure the framebuffer is where we want it to be. */
	*(uint32_t *)((i + 1) * PAGE_SIZE + XBOX_FB_START_PTR % PAGE_SIZE) =
	    XBOX_FB_START;

	/* Initialize fb_info. */
	info = vd->vd_softc;

	info->fb_width = VT_XBOX_WIDTH;
	info->fb_height = VT_XBOX_HEIGHT;

	info->fb_size = XBOX_FB_SIZE;
	info->fb_stride = VT_XBOX_WIDTH * 4; /* 32bits per pixel. */

	info->fb_vbase = PAGE_SIZE;
	info->fb_pbase = XBOX_FB_START_PTR;

	/* Get pixel storage size. */
	info->fb_bpp = 32;
	/* Get color depth. */
	info->fb_depth = 24;

	vt_generate_vga_palette(info->fb_cmap, COLOR_FORMAT_RGB, 255, 0, 255,
	    8, 255, 16);
	fb_probe(info);
	vt_fb_init(vd);

	return (CN_INTERNAL);
}
Ejemplo n.º 2
0
int
s3c2xx0_bs_map(void *t, bus_addr_t bpa, bus_size_t size,
               int flag, bus_space_handle_t * bshp)
{
    u_long startpa, endpa, pa;
    vm_offset_t va;
    pt_entry_t *pte;
    const struct pmap_devmap *pd;

    if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) {
        /* Device was statically mapped. */
        *bshp = pd->pd_va + (bpa - pd->pd_pa);
        return 0;
    }

    startpa = trunc_page(bpa);
    endpa = round_page(bpa + size);

    va = kmem_alloc_nofault(kernel_map, endpa - startpa);
    if (!va)
        return (ENOMEM);

    *bshp = (bus_space_handle_t) (va + (bpa - startpa));

    for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
        pmap_kenter(va, pa);
        pte = vtopte(va);
        if ((flag & BUS_SPACE_MAP_CACHEABLE) == 0)
            *pte &= ~L2_S_CACHE_MASK;
    }
    return (0);
}
Ejemplo n.º 3
0
static int
gnttab_map(unsigned int start_idx, unsigned int end_idx)
{
	struct xen_add_to_physmap xatp;
	unsigned int i = end_idx;

	/*
	 * Loop backwards, so that the first hypercall has the largest index,
	 * ensuring that the table will grow only once.
	 */
	do {
		xatp.domid = DOMID_SELF;
		xatp.idx = i;
		xatp.space = XENMAPSPACE_grant_table;
		xatp.gpfn = (resume_frames >> PAGE_SHIFT) + i;
		if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
			panic("HYPERVISOR_memory_op failed to map gnttab");
	} while (i-- > start_idx);

	if (shared == NULL) {
		vm_offset_t area;

		area = kva_alloc(PAGE_SIZE * max_nr_grant_frames());
		KASSERT(area, ("can't allocate VM space for grant table"));
		shared = (grant_entry_t *)area;
	}

	for (i = start_idx; i <= end_idx; i++) {
		pmap_kenter((vm_offset_t) shared + i * PAGE_SIZE,
		    resume_frames + i * PAGE_SIZE);
	}

	return (0);
}
Ejemplo n.º 4
0
void *
uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
{
    void *va;
    vm_page_t m;
    int pflags;

    *flags = UMA_SLAB_PRIV;
    pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED;

    for (;;) {
        m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ);
        if (m == NULL) {
            if (wait & M_NOWAIT)
                return (NULL);
            VM_WAIT;
        } else
            break;
    }

    va = (void *) VM_PAGE_TO_PHYS(m);

    if (!hw_direct_map)
        pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m));

    if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
        bzero(va, PAGE_SIZE);
    atomic_add_int(&hw_uma_mdpages, 1);

    return (va);
}
Ejemplo n.º 5
0
static          vm_offset_t
dpt_physmap(u_int32_t req_paddr, vm_size_t req_size)
{
	vm_offset_t     va;
	int             ndx;
	vm_size_t       size;
	u_int32_t       paddr;
	u_int32_t       offset;



	size = (req_size / PAGE_SIZE + 1) * PAGE_SIZE;
	paddr = req_paddr & 0xfffff000;
	offset = req_paddr - paddr;

	va = kmem_alloc_pageable(kernel_map, size);
	if (va == (vm_offset_t) 0)
		return (va);

	for (ndx = 0; ndx < size; ndx += PAGE_SIZE) {
		pmap_kenter(va + ndx, paddr + ndx);
		invltlb();
	}

	return (va + offset);
}
Ejemplo n.º 6
0
static void
ofw_real_bounce_alloc(void *junk)
{
	/*
	 * Check that ofw_real is actually in use before allocating wads 
	 * of memory. Do this by checking if our mutex has been set up.
	 */
	if (!mtx_initialized(&of_bounce_mtx))
		return;

	/*
	 * Allocate a page of contiguous, wired physical memory that can
	 * fit into a 32-bit address space and accessed from real mode.
	 */

	mtx_lock(&of_bounce_mtx);

	of_bounce_virt = contigmalloc(4 * PAGE_SIZE, M_OFWREAL, 0, 0,
	    ulmin(platform_real_maxaddr(), BUS_SPACE_MAXADDR_32BIT), PAGE_SIZE,
	    4 * PAGE_SIZE);

	of_bounce_phys = vtophys(of_bounce_virt);
	of_bounce_size = 4 * PAGE_SIZE;

	/*
	 * For virtual-mode OF, direct map this physical address so that
	 * we have a 32-bit virtual address to give OF.
	 */

	if (!ofw_real_mode && !hw_direct_map) 
		pmap_kenter(of_bounce_phys, of_bounce_phys);

	mtx_unlock(&of_bounce_mtx);
}
Ejemplo n.º 7
0
/*
 * Get an sf_buf from the freelist. Will block if none are available.
 */
struct sf_buf *
sf_buf_alloc(struct vm_page *m, int flags)
{
#ifdef ARM_USE_SMALL_ALLOC
	return ((struct sf_buf *)m);
#else
	struct sf_head *hash_list;
	struct sf_buf *sf;
	int error;

	hash_list = &sf_buf_active[SF_BUF_HASH(m)];
	mtx_lock(&sf_buf_lock);
	LIST_FOREACH(sf, hash_list, list_entry) {
		if (sf->m == m) {
			sf->ref_count++;
			if (sf->ref_count == 1) {
				TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
				nsfbufsused++;
				nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
			}
			goto done;
		}
	}
	while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
		if (flags & SFB_NOWAIT)
			goto done;
		sf_buf_alloc_want++;
		mbstat.sf_allocwait++;
		error = msleep(&sf_buf_freelist, &sf_buf_lock,
		    (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0);
		sf_buf_alloc_want--;
	

		/*
		 * If we got a signal, don't risk going back to sleep. 
		 */
		if (error)
			goto done;
	}
	TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
	if (sf->m != NULL)
		LIST_REMOVE(sf, list_entry);
	LIST_INSERT_HEAD(hash_list, sf, list_entry);
	sf->ref_count = 1;
	sf->m = m;
	nsfbufsused++;
	nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
	pmap_kenter(sf->kva, VM_PAGE_TO_PHYS(sf->m));
done:
	mtx_unlock(&sf_buf_lock);
	return (sf);
#endif
}
Ejemplo n.º 8
0
/*
 * Attach - find resources and talk to Xen.
 */
static int
xenpci_attach(device_t dev)
{
        int error;
	struct xenpci_softc *scp = device_get_softc(dev);
	struct xen_add_to_physmap xatp;
	vm_offset_t shared_va;

	error = xenpci_allocate_resources(dev);
	if (error)
		goto errexit;

	scp->phys_next = rman_get_start(scp->res_memory);

	error = xenpci_init_hypercall_stubs(dev, scp);
	if (error)
		goto errexit;

	setup_xen_features();

	xenpci_alloc_space_int(scp, PAGE_SIZE, &shared_info_pa); 

	xatp.domid = DOMID_SELF;
	xatp.idx = 0;
	xatp.space = XENMAPSPACE_shared_info;
	xatp.gpfn = shared_info_pa >> PAGE_SHIFT;
	if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
		panic("HYPERVISOR_memory_op failed");

	shared_va = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
	pmap_kenter(shared_va, shared_info_pa);
	HYPERVISOR_shared_info = (void *) shared_va;

	/*
	 * Hook the irq up to evtchn
	 */
	xenpci_irq_init(dev, scp);
	xenpci_set_callback(dev);

	return (bus_generic_attach(dev));

errexit:
	/*
	 * Undo anything we may have done.
	 */
	xenpci_deallocate_resources(dev);
        return (error);
}
Ejemplo n.º 9
0
void
bs_remap_earlyboot(void)
{
	int i;
	vm_offset_t pa, spa;

	if (hw_direct_map)
		return;

	for (i = 0; i < earlyboot_map_idx; i++) {
		spa = earlyboot_mappings[i].addr;

		pa = trunc_page(spa);
		while (pa < spa + earlyboot_mappings[i].size) {
			pmap_kenter(pa,pa);
			pa += PAGE_SIZE;
		}
	}
}
Ejemplo n.º 10
0
/*
 * Map some memory using the crashdump map.  'offset' is an offset in
 * pages into the crashdump map to use for the start of the mapping.
 */
static void *
table_map(vm_paddr_t pa, int offset, vm_offset_t length)
{
	vm_offset_t va, off;
	void *data;

	off = pa & PAGE_MASK;
	length = roundup(length + off, PAGE_SIZE);
	pa = pa & PG_FRAME;
	va = (vm_offset_t)pmap_kenter_temporary(pa, offset) +
	    (offset * PAGE_SIZE);
	data = (void *)(va + off);
	length -= PAGE_SIZE;
	while (length > 0) {
		va += PAGE_SIZE;
		pa += PAGE_SIZE;
		length -= PAGE_SIZE;
		pmap_kenter(va, pa);
		invlpg(va);
	}
	return (data);
}
Ejemplo n.º 11
0
/*
 * Called very early in the resume sequence - reinitialise the various
 * bits of Xen machinery including the hypercall page and the shared
 * info page.
 */
void
xenpci_resume()
{
	device_t dev = devclass_get_device(xenpci_devclass, 0);
	struct xenpci_softc *scp = device_get_softc(dev);
	struct xen_add_to_physmap xatp;

	xenpci_resume_hypercall_stubs(dev, scp);

	xatp.domid = DOMID_SELF;
	xatp.idx = 0;
	xatp.space = XENMAPSPACE_shared_info;
	xatp.gpfn = shared_info_pa >> PAGE_SHIFT;
	if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
		panic("HYPERVISOR_memory_op failed");

	pmap_kenter((vm_offset_t) HYPERVISOR_shared_info, shared_info_pa);

	xenpci_set_callback(dev);

	gnttab_resume();
	irq_resume();
}
Ejemplo n.º 12
0
int
generic_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flags,
    bus_space_handle_t *bshp)
{
	const struct pmap_devmap *pd;
	vm_paddr_t startpa, endpa, pa, offset;
	vm_offset_t va;
	pt_entry_t *pte;

	if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) {
		/* Device was statically mapped. */
		*bshp = pd->pd_va + (bpa - pd->pd_pa);
		return (0);
	}

	endpa = round_page(bpa + size);
	offset = bpa & PAGE_MASK;
	startpa = trunc_page(bpa);

	va = kmem_alloc(kernel_map, endpa - startpa);
	if (va == 0)
		return (ENOMEM);

	*bshp = va + offset;

	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter(va, pa);
		pte = vtopte(va);
		if (!(flags & BUS_SPACE_MAP_CACHEABLE)) {
			*pte &= ~L2_S_CACHE_MASK;
			PTE_SYNC(pte);
		}
	}

	return (0);
}
Ejemplo n.º 13
0
static void
cpu_initialize_context(unsigned int cpu)
{
	/* vcpu_guest_context_t is too large to allocate on the stack.
	 * Hence we allocate statically and protect it with a lock */
	vm_page_t m[NPGPTD + 2];
	static vcpu_guest_context_t ctxt;
	vm_offset_t boot_stack;
	vm_offset_t newPTD;
	vm_paddr_t ma[NPGPTD];
	int i;

	/*
	 * Page 0,[0-3]	PTD
	 * Page 1, [4]	boot stack
	 * Page [5]	PDPT
	 *
	 */
	for (i = 0; i < NPGPTD + 2; i++) {
		m[i] = vm_page_alloc(NULL, 0,
		    VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
		    VM_ALLOC_ZERO);

		pmap_zero_page(m[i]);

	}
	boot_stack = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
	newPTD = kmem_alloc_nofault(kernel_map, NPGPTD * PAGE_SIZE);
	ma[0] = VM_PAGE_TO_MACH(m[0])|PG_V;

#ifdef PAE	
	pmap_kenter(boot_stack, VM_PAGE_TO_PHYS(m[NPGPTD + 1]));
	for (i = 0; i < NPGPTD; i++) {
		((vm_paddr_t *)boot_stack)[i] =
		ma[i] = VM_PAGE_TO_MACH(m[i])|PG_V;
	}
#endif	

	/*
	 * Copy cpu0 IdlePTD to new IdlePTD - copying only
	 * kernel mappings
	 */
	pmap_qenter(newPTD, m, 4);
	
	memcpy((uint8_t *)newPTD + KPTDI*sizeof(vm_paddr_t),
	    (uint8_t *)PTOV(IdlePTD) + KPTDI*sizeof(vm_paddr_t),
	    nkpt*sizeof(vm_paddr_t));

	pmap_qremove(newPTD, 4);
	kmem_free(kernel_map, newPTD, 4 * PAGE_SIZE);
	/*
	 * map actual idle stack to boot_stack
	 */
	pmap_kenter(boot_stack, VM_PAGE_TO_PHYS(m[NPGPTD]));


	xen_pgdpt_pin(VM_PAGE_TO_MACH(m[NPGPTD + 1]));
	rw_wlock(&pvh_global_lock);
	for (i = 0; i < 4; i++) {
		int pdir = (PTDPTDI + i) / NPDEPG;
		int curoffset = (PTDPTDI + i) % NPDEPG;
		
		xen_queue_pt_update((vm_paddr_t)
		    ((ma[pdir] & ~PG_V) + (curoffset*sizeof(vm_paddr_t))), 
		    ma[i]);
	}
	PT_UPDATES_FLUSH();
	rw_wunlock(&pvh_global_lock);
	
	memset(&ctxt, 0, sizeof(ctxt));
	ctxt.flags = VGCF_IN_KERNEL;
	ctxt.user_regs.ds = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.user_regs.es = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.user_regs.fs = GSEL(GPRIV_SEL, SEL_KPL);
	ctxt.user_regs.gs = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.user_regs.cs = GSEL(GCODE_SEL, SEL_KPL);
	ctxt.user_regs.ss = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.user_regs.eip = (unsigned long)init_secondary;
	ctxt.user_regs.eflags = PSL_KERNEL | 0x1000; /* IOPL_RING1 */

	memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));

	smp_trap_init(ctxt.trap_ctxt);

	ctxt.ldt_ents = 0;
	ctxt.gdt_frames[0] = (uint32_t)((uint64_t)vtomach(bootAPgdt) >> PAGE_SHIFT);
	ctxt.gdt_ents      = 512;

#ifdef __i386__
	ctxt.user_regs.esp = boot_stack + PAGE_SIZE;

	ctxt.kernel_ss = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.kernel_sp = boot_stack + PAGE_SIZE;

	ctxt.event_callback_cs     = GSEL(GCODE_SEL, SEL_KPL);
	ctxt.event_callback_eip    = (unsigned long)Xhypervisor_callback;
	ctxt.failsafe_callback_cs  = GSEL(GCODE_SEL, SEL_KPL);
	ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;

	ctxt.ctrlreg[3] = VM_PAGE_TO_MACH(m[NPGPTD + 1]);
#else /* __x86_64__ */
	ctxt.user_regs.esp = idle->thread.rsp0 - sizeof(struct pt_regs);
	ctxt.kernel_ss = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.kernel_sp = idle->thread.rsp0;

	ctxt.event_callback_eip    = (unsigned long)hypervisor_callback;
	ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
	ctxt.syscall_callback_eip  = (unsigned long)system_call;

	ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(init_level4_pgt));

	ctxt.gs_base_kernel = (unsigned long)(cpu_pda(cpu));
#endif

	printf("gdtpfn=%lx pdptpfn=%lx\n",
	    ctxt.gdt_frames[0],
	    ctxt.ctrlreg[3] >> PAGE_SHIFT);

	PANIC_IF(HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, &ctxt));
	DELAY(3000);
	PANIC_IF(HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL));
}
Ejemplo n.º 14
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	int o;
	u_int c = 0;
	vm_paddr_t pa;
	struct iovec *iov;
	int error = 0;
	vm_offset_t addr;

	/* XXX UPS Why ? */
	GIANT_REQUIRED;


	if (dev2unit(dev) != CDEV_MINOR_MEM && dev2unit(dev) != CDEV_MINOR_KMEM)
		return EIO;

	if (dev2unit(dev) == CDEV_MINOR_KMEM && uio->uio_resid > 0) {
		if (uio->uio_offset < (vm_offset_t)VADDR(PTDPTDI, 0))
				return (EFAULT);

		if (!kernacc((caddr_t)(int)uio->uio_offset, uio->uio_resid,
		    uio->uio_rw == UIO_READ ?  VM_PROT_READ : VM_PROT_WRITE))
			return (EFAULT);
	}

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			pa = uio->uio_offset;
			pa &= ~PAGE_MASK;
		} else {
			/*
			 * Extract the physical page since the mapping may
			 * change at any time. This avoids panics on page 
			 * fault in this case but will cause reading/writing
			 * to the wrong page.
			 * Hopefully an application will notice the wrong
			 * data on read access and refrain from writing.
			 * This should be replaced by a special uiomove
			 * type function that just returns an error if there
			 * is a page fault on a kernel page. 
			 */
			addr = trunc_page(uio->uio_offset);
			pa = pmap_extract(kernel_pmap, addr);
			if (pa == 0) 
				return EFAULT;

		}
		
		/* 
		 * XXX UPS This should just use sf_buf_alloc.
		 * Unfortunately sf_buf_alloc needs a vm_page
		 * and we may want to look at memory not covered
		 * by the page array.
		 */

		sx_xlock(&memsxlock);
		pmap_kenter((vm_offset_t)ptvmmap, pa);
		pmap_invalidate_page(kernel_pmap,(vm_offset_t)ptvmmap);

		o = (int)uio->uio_offset & PAGE_MASK;
		c = PAGE_SIZE - o;
		c = min(c, (u_int)iov->iov_len);
		error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
		pmap_qremove((vm_offset_t)ptvmmap, 1);
		sx_xunlock(&memsxlock);
		
	}

	return (error);
}
Ejemplo n.º 15
0
/*ARGSUSED*/
static int
mmrw(dev_t dev, struct uio *uio, int flags)
{
	int o;
	u_long c = 0, v;
	struct iovec *iov;
	int error = 0;
	vm_offset_t addr, eaddr;

	GIANT_REQUIRED;

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("mmrw");
			continue;
		}
		switch (minor(dev)) {

/* minor device 0 is physical memory */
		case 0:
			v = uio->uio_offset;
			v &= ~PAGE_MASK;
			pmap_kenter((vm_offset_t)ptvmmap, v);
			o = (int)uio->uio_offset & PAGE_MASK;
			c = (u_long)(PAGE_SIZE - ((long)iov->iov_base & PAGE_MASK));
			c = min(c, (u_int)(PAGE_SIZE - o));
			c = min(c, (u_int)iov->iov_len);
			error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
			pmap_qremove((vm_offset_t)ptvmmap, 1);
			continue;

/* minor device 1 is kernel memory */
		case 1:
			c = iov->iov_len;

			/*
			 * Make sure that all of the pages are currently resident so
			 * that we don't create any zero-fill pages.
			 */
			addr = trunc_page(uio->uio_offset);
			eaddr = round_page(uio->uio_offset + c);

			if (addr < (vm_offset_t)KERNBASE)
				return (EFAULT);
			for (; addr < eaddr; addr += PAGE_SIZE) 
				if (pmap_extract(kernel_pmap, addr) == 0)
					return (EFAULT);

			if (!kernacc((caddr_t)(long)uio->uio_offset, c,
			    uio->uio_rw == UIO_READ ? 
			    VM_PROT_READ : VM_PROT_WRITE))
				return (EFAULT);
			error = uiomove((caddr_t)(long)uio->uio_offset, (int)c, uio);
			continue;

		default:
			return (ENODEV);
		}

		if (error)
			break;
		iov->iov_base = (char *)iov->iov_base + c;
		iov->iov_len -= c;
		uio->uio_offset += c;
		uio->uio_resid -= c;
	}
	return (error);
}
static int
mmrw(cdev_t dev, struct uio *uio, int flags)
{
	int o;
	u_int c;
	u_int poolsize;
	u_long v;
	struct iovec *iov;
	int error = 0;
	caddr_t buf = NULL;

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("mmrw");
			continue;
		}
		switch (minor(dev)) {
		case 0:
			/*
			 * minor device 0 is physical memory, /dev/mem 
			 */
			v = uio->uio_offset;
			v &= ~(long)PAGE_MASK;
			pmap_kenter((vm_offset_t)ptvmmap, v);
			o = (int)uio->uio_offset & PAGE_MASK;
			c = (u_int)(PAGE_SIZE - ((uintptr_t)iov->iov_base & PAGE_MASK));
			c = min(c, (u_int)(PAGE_SIZE - o));
			c = min(c, (u_int)iov->iov_len);
			error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
			pmap_kremove((vm_offset_t)ptvmmap);
			continue;

		case 1: {
			/*
			 * minor device 1 is kernel memory, /dev/kmem 
			 */
			vm_offset_t saddr, eaddr;
			int prot;

			c = iov->iov_len;

			/*
			 * Make sure that all of the pages are currently 
			 * resident so that we don't create any zero-fill
			 * pages.
			 */
			saddr = trunc_page(uio->uio_offset);
			eaddr = round_page(uio->uio_offset + c);
			if (saddr > eaddr)
				return EFAULT;

			/*
			 * Make sure the kernel addresses are mapped.
			 * platform_direct_mapped() can be used to bypass
			 * default mapping via the page table (virtual kernels
			 * contain a lot of out-of-band data).
			 */
			prot = VM_PROT_READ;
			if (uio->uio_rw != UIO_READ)
				prot |= VM_PROT_WRITE;
			error = kvm_access_check(saddr, eaddr, prot);
			if (error)
				return (error);
			error = uiomove((caddr_t)(vm_offset_t)uio->uio_offset,
					(int)c, uio);
			continue;
		}
		case 2:
			/*
			 * minor device 2 (/dev/null) is EOF/RATHOLE
			 */
			if (uio->uio_rw == UIO_READ)
				return (0);
			c = iov->iov_len;
			break;
		case 3:
			/*
			 * minor device 3 (/dev/random) is source of filth
			 * on read, seeder on write
			 */
			if (buf == NULL)
				buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK);
			c = min(iov->iov_len, PAGE_SIZE);
			if (uio->uio_rw == UIO_WRITE) {
				error = uiomove(buf, (int)c, uio);
				if (error == 0 &&
				    seedenable &&
				    securelevel <= 0) {
					error = add_buffer_randomness_src(buf, c, RAND_SRC_SEEDING);
				} else if (error == 0) {
					error = EPERM;
				}
			} else {
				poolsize = read_random(buf, c);
				if (poolsize == 0) {
					if (buf)
						kfree(buf, M_TEMP);
					if ((flags & IO_NDELAY) != 0)
						return (EWOULDBLOCK);
					return (0);
				}
				c = min(c, poolsize);
				error = uiomove(buf, (int)c, uio);
			}
			continue;
		case 4:
			/*
			 * minor device 4 (/dev/urandom) is source of muck
			 * on read, writes are disallowed.
			 */
			c = min(iov->iov_len, PAGE_SIZE);
			if (uio->uio_rw == UIO_WRITE) {
				error = EPERM;
				break;
			}
			if (CURSIG(curthread->td_lwp) != 0) {
				/*
				 * Use tsleep() to get the error code right.
				 * It should return immediately.
				 */
				error = tsleep(&rand_bolt, PCATCH, "urand", 1);
				if (error != 0 && error != EWOULDBLOCK)
					continue;
			}
			if (buf == NULL)
				buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK);
			poolsize = read_random_unlimited(buf, c);
			c = min(c, poolsize);
			error = uiomove(buf, (int)c, uio);
			continue;
		case 12:
			/*
			 * minor device 12 (/dev/zero) is source of nulls 
			 * on read, write are disallowed.
			 */
			if (uio->uio_rw == UIO_WRITE) {
				c = iov->iov_len;
				break;
			}
			if (zbuf == NULL) {
				zbuf = (caddr_t)kmalloc(PAGE_SIZE, M_TEMP,
				    M_WAITOK | M_ZERO);
			}
			c = min(iov->iov_len, PAGE_SIZE);
			error = uiomove(zbuf, (int)c, uio);
			continue;
		default:
			return (ENODEV);
		}
		if (error)
			break;
		iov->iov_base = (char *)iov->iov_base + c;
		iov->iov_len -= c;
		uio->uio_offset += c;
		uio->uio_resid -= c;
	}
	if (buf)
		kfree(buf, M_TEMP);
	return (error);
}
Ejemplo n.º 17
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	int o;
	u_int c = 0, v;
	struct iovec *iov;
	int error = 0;
	vm_offset_t addr, eaddr;

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			int i;
			int address_valid = 0;

			v = uio->uio_offset;
			v &= ~PAGE_MASK;
			for (i = 0; dump_avail[i] || dump_avail[i + 1];
			i += 2) {
				if (v >= dump_avail[i] &&
				    v < dump_avail[i + 1]) {
					address_valid = 1;
					break;
				}
			}
			if (!address_valid)
				return (EINVAL);
			sx_xlock(&tmppt_lock);
			pmap_kenter((vm_offset_t)_tmppt, v);
			o = (int)uio->uio_offset & PAGE_MASK;
			c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK));
			c = min(c, (u_int)(PAGE_SIZE - o));
			c = min(c, (u_int)iov->iov_len);
			error = uiomove((caddr_t)&_tmppt[o], (int)c, uio);
			pmap_qremove((vm_offset_t)_tmppt, 1);
			sx_xunlock(&tmppt_lock);
			continue;
		}
		else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
			c = iov->iov_len;

			/*
			 * Make sure that all of the pages are currently
			 * resident so that we don't create any zero-fill
			 * pages.
			 */
			addr = trunc_page(uio->uio_offset);
			eaddr = round_page(uio->uio_offset + c);

			for (; addr < eaddr; addr += PAGE_SIZE)
				if (pmap_extract(kernel_pmap, addr) == 0)
					return (EFAULT);
			if (!kernacc((caddr_t)(int)uio->uio_offset, c,
			    uio->uio_rw == UIO_READ ?
			    VM_PROT_READ : VM_PROT_WRITE))
					return (EFAULT);
			error = uiomove((caddr_t)(int)uio->uio_offset, (int)c, uio);
			continue;
		}
		/* else panic! */
	}
	return (error);
}