コード例 #1
0
ファイル: via_dmablit.c プロジェクト: JabirTech/Source
/*
 * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
 * occur here if the calling user does not have access to the submitted address.
 */
static int
via_lock_all_dma_pages(drm_via_sg_info_t *vsg,  drm_via_dmablit_t *xfer)
{
	unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
	vm_page_t m;
	int i;

	vsg->num_pages = VIA_PFN(xfer->mem_addr +
	    (xfer->num_lines * xfer->mem_stride -1)) - first_pfn + 1;

	if (NULL == (vsg->pages = malloc(sizeof(vm_page_t) * vsg->num_pages,
	    DRM_MEM_DRIVER, M_NOWAIT)))
		return -ENOMEM;

	vsg->state = dr_via_pages_alloc;

	if (vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
	    (vm_offset_t)xfer->mem_addr, vsg->num_pages * PAGE_SIZE,
	    VM_PROT_READ | VM_PROT_WRITE, vsg->pages, vsg->num_pages) < 0)
		return -EACCES;

	for (i = 0; i < vsg->num_pages; i++) {
		m = vsg->pages[i];
		vm_page_lock(m);
		vm_page_wire(m);
		vm_page_unhold(m);
		vm_page_unlock(m);
	}
	vsg->state = dr_via_pages_locked;

	DRM_DEBUG("DMA pages locked\n");

	return 0;
}
コード例 #2
0
/*
 * Given a user pointer to a page of user memory, return an sf_buf for the
 * page.  Because we may be requesting quite a few sf_bufs, prefer failure to
 * deadlock and use SFB_NOWAIT.
 */
static struct sf_buf *
zbuf_sfbuf_get(struct vm_map *map, vm_offset_t uaddr)
{
	struct sf_buf *sf;
	vm_page_t pp;

	if (vm_fault_quick_hold_pages(map, uaddr, PAGE_SIZE, VM_PROT_READ |
	    VM_PROT_WRITE, &pp, 1) < 0)
		return (NULL);
	vm_page_lock(pp);
	vm_page_wire(pp);
	vm_page_unhold(pp);
	vm_page_unlock(pp);
	sf = sf_buf_alloc(pp, SFB_NOWAIT);
	if (sf == NULL) {
		zbuf_page_free(pp);
		return (NULL);
	}
	return (sf);
}
コード例 #3
0
ファイル: vmm.c プロジェクト: vinceguogit/freebsd
void *
vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
	    void **cookie)
{
	int count, pageoff;
	vm_page_t m;

	pageoff = gpa & PAGE_MASK;
	if (len > PAGE_SIZE - pageoff)
		panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len);

	count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map,
	    trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1);

	if (count == 1) {
		*cookie = m;
		return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff));
	} else {
		*cookie = NULL;
		return (NULL);
	}
}
コード例 #4
0
ファイル: t4_ddp.c プロジェクト: cyrilmagsuci/freebsd
static int
hold_uio(struct uio *uio, vm_page_t **ppages, int *pnpages)
{
	struct vm_map *map;
	struct iovec *iov;
	vm_offset_t start, end;
	vm_page_t *pp;
	int n;

	KASSERT(uio->uio_iovcnt == 1,
	    ("%s: uio_iovcnt %d", __func__, uio->uio_iovcnt));
	KASSERT(uio->uio_td->td_proc == curproc,
	    ("%s: uio proc (%p) is not curproc (%p)",
	    __func__, uio->uio_td->td_proc, curproc));

	map = &curproc->p_vmspace->vm_map;
	iov = &uio->uio_iov[0];
	start = trunc_page((uintptr_t)iov->iov_base);
	end = round_page((vm_offset_t)iov->iov_base + iov->iov_len);
	n = howmany(end - start, PAGE_SIZE);

	if (end - start > MAX_DDP_BUFFER_SIZE)
		return (E2BIG);

	pp = malloc(n * sizeof(vm_page_t), M_CXGBE, M_NOWAIT);
	if (pp == NULL)
		return (ENOMEM);

	if (vm_fault_quick_hold_pages(map, (vm_offset_t)iov->iov_base,
	    iov->iov_len, VM_PROT_WRITE, pp, n) < 0) {
		free(pp, M_CXGBE);
		return (EFAULT);
	}

	*ppages = pp;
	*pnpages = n;

	return (0);
}
コード例 #5
0
static int
create_pagelist(char __user *buf, size_t count, unsigned short type,
	struct proc *p, PAGELIST_T ** ppagelist)
{
	PAGELIST_T *pagelist;
	vm_page_t* pages;
	unsigned long *addrs;
	unsigned int num_pages, offset, i;
	int pagelist_size;
	char *addr, *base_addr, *next_addr;
	int run, addridx, actual_pages;

	offset = (unsigned int)buf & (PAGE_SIZE - 1);
	num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;

	*ppagelist = NULL;

	/* Allocate enough storage to hold the page pointers and the page
	** list
	*/
	pagelist_size = sizeof(PAGELIST_T) +
		(num_pages * sizeof(unsigned long)) +
		(num_pages * sizeof(vm_page_t));
	pagelist = malloc(pagelist_size, M_VCPAGELIST, M_WAITOK | M_ZERO);

	vchiq_log_trace(vchiq_arm_log_level,
		"create_pagelist - %x", (unsigned int)pagelist);
	if (!pagelist)
		return -ENOMEM;

	addrs = pagelist->addrs;
	pages = (vm_page_t*)(addrs + num_pages);

	actual_pages = vm_fault_quick_hold_pages(&p->p_vmspace->vm_map,
	    (vm_offset_t)buf, count,
	    (type == PAGELIST_READ ? VM_PROT_WRITE : 0 ) | VM_PROT_READ, pages, num_pages);

   if (actual_pages != num_pages)
   {
      vm_page_unhold_pages(pages, actual_pages);
      free(pagelist, M_VCPAGELIST);
      return (-ENOMEM);
   }

	pagelist->length = count;
	pagelist->type = type;
	pagelist->offset = offset;

	/* Group the pages into runs of contiguous pages */

	base_addr = (void *)PHYS_TO_VCBUS(VM_PAGE_TO_PHYS(pages[0]));
	next_addr = base_addr + PAGE_SIZE;
	addridx = 0;
	run = 0;

	for (i = 1; i < num_pages; i++) {
		addr = (void *)PHYS_TO_VCBUS(VM_PAGE_TO_PHYS(pages[i]));
		if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) {
			next_addr += PAGE_SIZE;
			run++;
		} else {
			addrs[addridx] = (unsigned long)base_addr + run;
			addridx++;
			base_addr = addr;
			next_addr = addr + PAGE_SIZE;
			run = 0;
		}
	}

	addrs[addridx] = (unsigned long)base_addr + run;
	addridx++;

	/* Partial cache lines (fragments) require special measures */
	if ((type == PAGELIST_READ) &&
		((pagelist->offset & (CACHE_LINE_SIZE - 1)) ||
		((pagelist->offset + pagelist->length) &
		(CACHE_LINE_SIZE - 1)))) {
		FRAGMENTS_T *fragments;

		if (down_interruptible(&g_free_fragments_sema) != 0) {
      			free(pagelist, M_VCPAGELIST);
			return -EINTR;
		}

		WARN_ON(g_free_fragments == NULL);

		down(&g_free_fragments_mutex);
		fragments = (FRAGMENTS_T *) g_free_fragments;
		WARN_ON(fragments == NULL);
		g_free_fragments = *(FRAGMENTS_T **) g_free_fragments;
		up(&g_free_fragments_mutex);
		pagelist->type =
			 PAGELIST_READ_WITH_FRAGMENTS + (fragments -
							 g_fragments_base);
	}

	cpu_dcache_wbinv_range((vm_offset_t)pagelist, pagelist_size);
	*ppagelist = pagelist;

	return 0;
}
コード例 #6
0
int
physio(struct cdev *dev, struct uio *uio, int ioflag)
{
	struct cdevsw *csw;
	struct buf *pbuf;
	struct bio *bp;
	struct vm_page **pages;
	caddr_t sa;
	u_int iolen, poff;
	int error, i, npages, maxpages;
	vm_prot_t prot;

	csw = dev->si_devsw;
	npages = 0;
	sa = NULL;
	/* check if character device is being destroyed */
	if (csw == NULL)
		return (ENXIO);

	/* XXX: sanity check */
	if(dev->si_iosize_max < PAGE_SIZE) {
		printf("WARNING: %s si_iosize_max=%d, using DFLTPHYS.\n",
		    devtoname(dev), dev->si_iosize_max);
		dev->si_iosize_max = DFLTPHYS;
	}

	/*
	 * If the driver does not want I/O to be split, that means that we
	 * need to reject any requests that will not fit into one buffer.
	 */
	if (dev->si_flags & SI_NOSPLIT &&
	    (uio->uio_resid > dev->si_iosize_max || uio->uio_resid > MAXPHYS ||
	    uio->uio_iovcnt > 1)) {
		/*
		 * Tell the user why his I/O was rejected.
		 */
		if (uio->uio_resid > dev->si_iosize_max)
			uprintf("%s: request size=%zd > si_iosize_max=%d; "
			    "cannot split request\n", devtoname(dev),
			    uio->uio_resid, dev->si_iosize_max);
		if (uio->uio_resid > MAXPHYS)
			uprintf("%s: request size=%zd > MAXPHYS=%d; "
			    "cannot split request\n", devtoname(dev),
			    uio->uio_resid, MAXPHYS);
		if (uio->uio_iovcnt > 1)
			uprintf("%s: request vectors=%d > 1; "
			    "cannot split request\n", devtoname(dev),
			    uio->uio_iovcnt);
		return (EFBIG);
	}

	/*
	 * Keep the process UPAGES from being swapped.  Processes swapped
	 * out while holding pbufs, used by swapper, may lead to deadlock.
	 */
	PHOLD(curproc);

	bp = g_alloc_bio();
	if (uio->uio_segflg != UIO_USERSPACE) {
		pbuf = NULL;
		pages = NULL;
	} else if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) {
		pbuf = NULL;
		maxpages = btoc(MIN(uio->uio_resid, MAXPHYS)) + 1;
		pages = malloc(sizeof(*pages) * maxpages, M_DEVBUF, M_WAITOK);
	} else {
		pbuf = uma_zalloc(pbuf_zone, M_WAITOK);
		sa = pbuf->b_data;
		maxpages = btoc(MAXPHYS);
		pages = pbuf->b_pages;
	}
	prot = VM_PROT_READ;
	if (uio->uio_rw == UIO_READ)
		prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
	error = 0;
	for (i = 0; i < uio->uio_iovcnt; i++) {
#ifdef RACCT
		if (racct_enable) {
			PROC_LOCK(curproc);
			if (uio->uio_rw == UIO_READ) {
				racct_add_force(curproc, RACCT_READBPS,
				    uio->uio_iov[i].iov_len);
				racct_add_force(curproc, RACCT_READIOPS, 1);
			} else {
				racct_add_force(curproc, RACCT_WRITEBPS,
				    uio->uio_iov[i].iov_len);
				racct_add_force(curproc, RACCT_WRITEIOPS, 1);
			}
			PROC_UNLOCK(curproc);
		}
#endif /* RACCT */

		while (uio->uio_iov[i].iov_len) {
			g_reset_bio(bp);
			if (uio->uio_rw == UIO_READ) {
				bp->bio_cmd = BIO_READ;
				curthread->td_ru.ru_inblock++;
			} else {
				bp->bio_cmd = BIO_WRITE;
				curthread->td_ru.ru_oublock++;
			}
			bp->bio_offset = uio->uio_offset;
			bp->bio_data = uio->uio_iov[i].iov_base;
			bp->bio_length = uio->uio_iov[i].iov_len;
			if (bp->bio_length > dev->si_iosize_max)
				bp->bio_length = dev->si_iosize_max;
			if (bp->bio_length > MAXPHYS)
				bp->bio_length = MAXPHYS;

			/*
			 * Make sure the pbuf can map the request.
			 * The pbuf has kvasize = MAXPHYS, so a request
			 * larger than MAXPHYS - PAGE_SIZE must be
			 * page aligned or it will be fragmented.
			 */
			poff = (vm_offset_t)bp->bio_data & PAGE_MASK;
			if (pbuf && bp->bio_length + poff > pbuf->b_kvasize) {
				if (dev->si_flags & SI_NOSPLIT) {
					uprintf("%s: request ptr %p is not "
					    "on a page boundary; cannot split "
					    "request\n", devtoname(dev),
					    bp->bio_data);
					error = EFBIG;
					goto doerror;
				}
				bp->bio_length = pbuf->b_kvasize;
				if (poff != 0)
					bp->bio_length -= PAGE_SIZE;
			}

			bp->bio_bcount = bp->bio_length;
			bp->bio_dev = dev;

			if (pages) {
				if ((npages = vm_fault_quick_hold_pages(
				    &curproc->p_vmspace->vm_map,
				    (vm_offset_t)bp->bio_data, bp->bio_length,
				    prot, pages, maxpages)) < 0) {
					error = EFAULT;
					goto doerror;
				}
				if (pbuf && sa) {
					pmap_qenter((vm_offset_t)sa,
					    pages, npages);
					bp->bio_data = sa + poff;
				} else {
					bp->bio_ma = pages;
					bp->bio_ma_n = npages;
					bp->bio_ma_offset = poff;
					bp->bio_data = unmapped_buf;
					bp->bio_flags |= BIO_UNMAPPED;
				}
			}

			csw->d_strategy(bp);
			if (uio->uio_rw == UIO_READ)
				biowait(bp, "physrd");
			else
				biowait(bp, "physwr");

			if (pages) {
				if (pbuf)
					pmap_qremove((vm_offset_t)sa, npages);
				vm_page_unhold_pages(pages, npages);
			}

			iolen = bp->bio_length - bp->bio_resid;
			if (iolen == 0 && !(bp->bio_flags & BIO_ERROR))
				goto doerror;	/* EOF */
			uio->uio_iov[i].iov_len -= iolen;
			uio->uio_iov[i].iov_base =
			    (char *)uio->uio_iov[i].iov_base + iolen;
			uio->uio_resid -= iolen;
			uio->uio_offset += iolen;
			if (bp->bio_flags & BIO_ERROR) {
				error = bp->bio_error;
				goto doerror;
			}
		}
	}
doerror:
	if (pbuf)
		uma_zfree(pbuf_zone, pbuf);
	else if (pages)
		free(pages, M_DEVBUF);
	g_destroy_bio(bp);
	PRELE(curproc);
	return (error);
}
コード例 #7
0
ファイル: uipc_cow.c プロジェクト: FelixHaller/libuinet
int
socow_setup(struct mbuf *m0, struct uio *uio)
{
    struct sf_buf *sf;
    vm_page_t pp;
    struct iovec *iov;
    struct vmspace *vmspace;
    struct vm_map *map;
    vm_offset_t offset, uva;
    vm_size_t len;

    socow_stats.attempted++;
    vmspace = curproc->p_vmspace;
    map = &vmspace->vm_map;
    uva = (vm_offset_t) uio->uio_iov->iov_base;
    offset = uva & PAGE_MASK;
    len = PAGE_SIZE - offset;

    /*
     * Verify that access to the given address is allowed from user-space.
     */
    if (vm_fault_quick_hold_pages(map, uva, len, VM_PROT_READ, &pp, 1) <
            0) {
        socow_stats.fail_not_mapped++;
        return(0);
    }

    /*
     * set up COW
     */
    vm_page_lock(pp);
    if (vm_page_cowsetup(pp) != 0) {
        vm_page_unhold(pp);
        vm_page_unlock(pp);
        return (0);
    }

    /*
     * wire the page for I/O
     */
    vm_page_wire(pp);
    vm_page_unhold(pp);
    vm_page_unlock(pp);
    /*
     * Allocate an sf buf
     */
    sf = sf_buf_alloc(pp, SFB_CATCH);
    if (sf == NULL) {
        vm_page_lock(pp);
        vm_page_cowclear(pp);
        vm_page_unwire(pp, 0);
        /*
         * Check for the object going away on us. This can
         * happen since we don't hold a reference to it.
         * If so, we're responsible for freeing the page.
         */
        if (pp->wire_count == 0 && pp->object == NULL)
            vm_page_free(pp);
        vm_page_unlock(pp);
        socow_stats.fail_sf_buf++;
        return(0);
    }
    /*
     * attach to mbuf
     */
    MEXTADD(m0, sf_buf_kva(sf), PAGE_SIZE, socow_iodone,
            (void*)sf_buf_kva(sf), sf, M_RDONLY, EXT_SFBUF);
    m0->m_len = len;
    m0->m_data = (caddr_t)sf_buf_kva(sf) + offset;
    socow_stats.success++;

    iov = uio->uio_iov;
    iov->iov_base = (char *)iov->iov_base + m0->m_len;
    iov->iov_len -= m0->m_len;
    uio->uio_resid -= m0->m_len;
    uio->uio_offset += m0->m_len;
    if (iov->iov_len == 0) {
        uio->uio_iov++;
        uio->uio_iovcnt--;
    }

    return(m0->m_len);
}
コード例 #8
0
ファイル: vchiq_2835_arm.c プロジェクト: brd/vchiq-freebsd
static int
create_pagelist(char __user *buf, size_t count, unsigned short type,
                struct proc *p, BULKINFO_T *bi)
{
    PAGELIST_T *pagelist;
    vm_page_t* pages;
    unsigned long *addrs;
    unsigned int num_pages, i;
    vm_offset_t offset;
    int pagelist_size;
    char *addr, *base_addr, *next_addr;
    int run, addridx, actual_pages;
    int err;
    vm_paddr_t pagelist_phys;

    offset = (vm_offset_t)buf & (PAGE_SIZE - 1);
    num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;

    bi->pagelist = NULL;
    bi->buf = buf;
    bi->size = count;

    /* Allocate enough storage to hold the page pointers and the page
    ** list
    */
    pagelist_size = sizeof(PAGELIST_T) +
                    (num_pages * sizeof(unsigned long)) +
                    (num_pages * sizeof(pages[0]));

    err = bus_dma_tag_create(
              NULL,
              PAGE_SIZE, 0,	       /* alignment, boundary */
              BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
              BUS_SPACE_MAXADDR,	  /* highaddr */
              NULL, NULL,		 /* filter, filterarg */
              pagelist_size, 1,		/* maxsize, nsegments */
              pagelist_size, 0,		/* maxsegsize, flags */
              NULL, NULL,		 /* lockfunc, lockarg */
              &bi->pagelist_dma_tag);



    err = bus_dmamem_alloc(bi->pagelist_dma_tag, (void **)&pagelist,
                           BUS_DMA_COHERENT | BUS_DMA_WAITOK, &bi->pagelist_dma_map);
    if (err) {
        vchiq_log_error(vchiq_core_log_level, "Unable to allocate pagelist memory");
        err = -ENOMEM;
        goto failed_alloc;
    }

    err = bus_dmamap_load(bi->pagelist_dma_tag, bi->pagelist_dma_map, pagelist,
                          pagelist_size, vchiq_dmamap_cb,
                          &pagelist_phys, 0);

    if (err) {
        vchiq_log_error(vchiq_core_log_level, "cannot load DMA map for pagelist memory");
        err = -ENOMEM;
        goto failed_load;
    }

    vchiq_log_trace(vchiq_arm_log_level,
                    "create_pagelist - %x", (unsigned int)pagelist);
    if (!pagelist)
        return -ENOMEM;

    addrs = pagelist->addrs;
    pages = (vm_page_t*)(addrs + num_pages);

    actual_pages = vm_fault_quick_hold_pages(&p->p_vmspace->vm_map,
                   (vm_offset_t)buf, count,
                   (type == PAGELIST_READ ? VM_PROT_WRITE : 0 ) | VM_PROT_READ, pages, num_pages);

    if (actual_pages != num_pages) {
        vm_page_unhold_pages(pages, actual_pages);
        free(pagelist, M_VCPAGELIST);
        return (-ENOMEM);
    }

    pagelist->length = count;
    pagelist->type = type;
    pagelist->offset = offset;

    /* Group the pages into runs of contiguous pages */

    base_addr = (void *)PHYS_TO_VCBUS(VM_PAGE_TO_PHYS(pages[0]));
    next_addr = base_addr + PAGE_SIZE;
    addridx = 0;
    run = 0;

    for (i = 1; i < num_pages; i++) {
        addr = (void *)PHYS_TO_VCBUS(VM_PAGE_TO_PHYS(pages[i]));
        if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) {
            next_addr += PAGE_SIZE;
            run++;
        } else {
            addrs[addridx] = (unsigned long)base_addr + run;
            addridx++;
            base_addr = addr;
            next_addr = addr + PAGE_SIZE;
            run = 0;
        }
    }

    addrs[addridx] = (unsigned long)base_addr + run;
    addridx++;

    /* Partial cache lines (fragments) require special measures */
    if ((type == PAGELIST_READ) &&
            ((pagelist->offset & (CACHE_LINE_SIZE - 1)) ||
             ((pagelist->offset + pagelist->length) &
              (CACHE_LINE_SIZE - 1)))) {
        FRAGMENTS_T *fragments;

        if (down_interruptible(&g_free_fragments_sema) != 0) {
            free(pagelist, M_VCPAGELIST);
            return -EINTR;
        }

        WARN_ON(g_free_fragments == NULL);

        down(&g_free_fragments_mutex);
        fragments = (FRAGMENTS_T *) g_free_fragments;
        WARN_ON(fragments == NULL);
        g_free_fragments = *(FRAGMENTS_T **) g_free_fragments;
        up(&g_free_fragments_mutex);
        pagelist->type =
            PAGELIST_READ_WITH_FRAGMENTS + (fragments -
                                            g_fragments_base);
    }

    /* XXX: optimize? INV operation for read WBINV for write? */
    cpu_dcache_wbinv_range((vm_offset_t)buf, count);

    bi->pagelist = pagelist;

    return 0;

failed_load:
    bus_dmamap_unload(bi->pagelist_dma_tag, bi->pagelist_dma_map);
failed_alloc:
    bus_dmamap_destroy(bi->pagelist_dma_tag, bi->pagelist_dma_map);
    bus_dma_tag_destroy(bi->pagelist_dma_tag);

    return err;
}