Exemplo n.º 1
0
/*
 * Handle actual transfers of data.
 */
static int sbullr_transfer (Sbull_Dev *dev, char *buf, size_t count,
                loff_t *offset, int rw)
{
    struct kiobuf *iobuf;       
    int result;
    
    /* Only block alignment and size allowed */
    if ((*offset & SBULLR_SECTOR_MASK) || (count & SBULLR_SECTOR_MASK))
        return -EINVAL;
    if ((unsigned long) buf & SBULLR_SECTOR_MASK)
        return -EINVAL;

    /* Allocate an I/O vector */
    result = alloc_kiovec(1, &iobuf);
    if (result)
        return result;

    /* Map the user I/O buffer and do the I/O. */
    result = map_user_kiobuf(rw, iobuf, (unsigned long) buf, count);
    if (result) {
        free_kiovec(1, &iobuf);
        return result;
    }
    spin_lock(&dev->lock);
    result = sbullr_rw_iovec(dev, iobuf, rw, *offset >> SBULLR_SECTOR_SHIFT,
                    count >> SBULLR_SECTOR_SHIFT);
    spin_unlock(&dev->lock);

    /* Clean up and return. */
    unmap_kiobuf(iobuf);
    free_kiovec(1, &iobuf);
    if (result > 0)
        *offset += result << SBULLR_SECTOR_SHIFT;
    return result << SBULLR_SECTOR_SHIFT;
}
Exemplo n.º 2
0
int map_user_kiobuf(int rw, struct kiobuf *iobuf, unsigned long va, size_t len)
{
	int pgcount, err;
	struct mm_struct *	mm;
	
	/* Make sure the iobuf is not already mapped somewhere. */
	if (iobuf->nr_pages)
		return -EINVAL;

	mm = current->mm;
	dprintk ("map_user_kiobuf: begin\n");
	
	pgcount = (va + len + PAGE_SIZE - 1)/PAGE_SIZE - va/PAGE_SIZE;
	/* mapping 0 bytes is not permitted */
	if (!pgcount) BUG();
	err = expand_kiobuf(iobuf, pgcount);
	if (err)
		return err;

	iobuf->locked = 0;
	iobuf->offset = va & (PAGE_SIZE-1);
	iobuf->length = len;
	
	/* Try to fault in all of the necessary pages */
	down_read(&mm->mmap_sem);
	/* rw==READ means read from disk, write into memory area */
	err = get_user_pages(current, mm, va, pgcount,
			(rw==READ), 0, iobuf->maplist, NULL);
	up_read(&mm->mmap_sem);
	if (err < 0) {
		unmap_kiobuf(iobuf);
		dprintk ("map_user_kiobuf: end %d\n", err);
		return err;
	}
	iobuf->nr_pages = err;
	while (pgcount--) {
		/* FIXME: flush superflous for rw==READ,
		 * probably wrong function for rw==WRITE
		 */
		flush_dcache_page(iobuf->maplist[pgcount]);
	}
	dprintk ("map_user_kiobuf: end OK\n");
	return 0;
}
Exemplo n.º 3
0
int map_user_kiobuf(int rw, struct kiobuf *iobuf, unsigned long va, size_t len)
{
	unsigned long		ptr, end;
	int			err;
	struct mm_struct *	mm;
	struct vm_area_struct *	vma = 0;
	struct page *		map;
	int			i;
	int			datain = (rw == READ);
	
	/* Make sure the iobuf is not already mapped somewhere. */
	if (iobuf->nr_pages)
		return -EINVAL;

	mm = current->mm;
	dprintk ("map_user_kiobuf: begin\n");
	
	ptr = va & PAGE_MASK;
	end = (va + len + PAGE_SIZE - 1) & PAGE_MASK;
	err = expand_kiobuf(iobuf, (end - ptr) >> PAGE_SHIFT);
	if (err)
		return err;

	down(&mm->mmap_sem);

	err = -EFAULT;
	iobuf->locked = 0;
	iobuf->offset = va & ~PAGE_MASK;
	iobuf->length = len;
	
	i = 0;
	
	/* 
	 * First of all, try to fault in all of the necessary pages
	 */
	while (ptr < end) {
		if (!vma || ptr >= vma->vm_end) {
			vma = find_vma(current->mm, ptr);
			if (!vma) 
				goto out_unlock;
			if (vma->vm_start > ptr) {
				if (!(vma->vm_flags & VM_GROWSDOWN))
					goto out_unlock;
				if (expand_stack(vma, ptr))
					goto out_unlock;
			}
			if (((datain) && (!(vma->vm_flags & VM_WRITE))) ||
					(!(vma->vm_flags & VM_READ))) {
				err = -EACCES;
				goto out_unlock;
			}
		}
		if (handle_mm_fault(current->mm, vma, ptr, datain) <= 0) 
			goto out_unlock;
		spin_lock(&mm->page_table_lock);
		map = follow_page(ptr);
		if (!map) {
			spin_unlock(&mm->page_table_lock);
			dprintk (KERN_ERR "Missing page in map_user_kiobuf\n");
			goto out_unlock;
		}
		map = get_page_map(map);
		if (map)
			atomic_inc(&map->count);
		else
			printk (KERN_INFO "Mapped page missing [%d]\n", i);
		spin_unlock(&mm->page_table_lock);
		iobuf->maplist[i] = map;
		iobuf->nr_pages = ++i;
		
		ptr += PAGE_SIZE;
	}

	up(&mm->mmap_sem);
	dprintk ("map_user_kiobuf: end OK\n");
	return 0;

 out_unlock:
	up(&mm->mmap_sem);
	unmap_kiobuf(iobuf);
	dprintk ("map_user_kiobuf: end %d\n", err);
	return err;
}
Exemplo n.º 4
0
void
VMCIHost_ReleaseUserMemory(PageStoreAttachInfo *attach,      // IN/OUT
                           VMCIQueue *produceQ,              // OUT
                           VMCIQueue *consumeQ)              // OUT
{

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
    int i;

    ASSERT(attach->producePages);
    ASSERT(attach->consumePages);

    kunmap(attach->producePages[0]);
    kunmap(attach->consumePages[0]);

    for (i = 0; i < attach->numProducePages; i++) {
        ASSERT(attach->producePages[i]);

        set_page_dirty(attach->producePages[i]);
        page_cache_release(attach->producePages[i]);
    }

    for (i = 0; i < attach->numConsumePages; i++) {
        ASSERT(attach->consumePages[i]);

        set_page_dirty(attach->consumePages[i]);
        page_cache_release(attach->consumePages[i]);
    }

    VMCI_FreeKernelMem(attach->producePages,
                       attach->numProducePages *
                       sizeof attach->producePages[0]);
    VMCI_FreeKernelMem(attach->consumePages,
                       attach->numConsumePages *
                       sizeof attach->consumePages[0]);
#else
    /*
     * Host queue pair support for earlier kernels temporarily
     * disabled. See bug 365496.
     */

    ASSERT_NOT_IMPLEMENTED(FALSE);
#if 0
    kunmap(attach->produceIoBuf->maplist[0]);
    kunmap(attach->consumeIoBuf->maplist[0]);

    mark_dirty_kiobuf(attach->produceIoBuf,
                      attach->numProducePages * PAGE_SIZE);
    unmap_kiobuf(attach->produceIoBuf);

    mark_dirty_kiobuf(attach->consumeIoBuf,
                      attach->numConsumePages * PAGE_SIZE);
    unmap_kiobuf(attach->consumeIoBuf);

    VMCI_FreeKernelMem(attach->produceIoBuf,
                       sizeof *attach->produceIoBuf);
    VMCI_FreeKernelMem(attach->consumeIoBuf,
                       sizeof *attach->consumeIoBuf);
#endif
#endif
}
Exemplo n.º 5
0
int
VMCIHost_GetUserMemory(PageStoreAttachInfo *attach,      // IN/OUT
                       VMCIQueue *produceQ,              // OUT
                       VMCIQueue *consumeQ)              // OUT
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
    int retval;
    int err = VMCI_SUCCESS;


    attach->producePages =
        VMCI_AllocKernelMem(attach->numProducePages * sizeof attach->producePages[0],
                            VMCI_MEMORY_NORMAL);
    if (attach->producePages == NULL) {
        return VMCI_ERROR_NO_MEM;
    }
    attach->consumePages =
        VMCI_AllocKernelMem(attach->numConsumePages * sizeof attach->consumePages[0],
                            VMCI_MEMORY_NORMAL);
    if (attach->consumePages == NULL) {
        err = VMCI_ERROR_NO_MEM;
        goto errorDealloc;
    }

    down_write(&current->mm->mmap_sem);
    retval = get_user_pages(current,
                            current->mm,
                            (VA)attach->produceBuffer,
                            attach->numProducePages,
                            1, 0,
                            attach->producePages,
                            NULL);
    if (retval < attach->numProducePages) {
        Log("get_user_pages(produce) failed: %d\n", retval);
        if (retval > 0) {
            int i;
            for (i = 0; i < retval; i++) {
                page_cache_release(attach->producePages[i]);
            }
        }
        err = VMCI_ERROR_NO_MEM;
        goto out;
    }

    retval = get_user_pages(current,
                            current->mm,
                            (VA)attach->consumeBuffer,
                            attach->numConsumePages,
                            1, 0,
                            attach->consumePages,
                            NULL);
    if (retval < attach->numConsumePages) {
        int i;
        Log("get_user_pages(consume) failed: %d\n", retval);
        if (retval > 0) {
            for (i = 0; i < retval; i++) {
                page_cache_release(attach->consumePages[i]);
            }
        }
        for (i = 0; i < attach->numProducePages; i++) {
            page_cache_release(attach->producePages[i]);
        }
        err = VMCI_ERROR_NO_MEM;
    }

    if (err == VMCI_SUCCESS) {
        produceQ->queueHeaderPtr = kmap(attach->producePages[0]);
        produceQ->page = &attach->producePages[1];
        consumeQ->queueHeaderPtr = kmap(attach->consumePages[0]);
        consumeQ->page = &attach->consumePages[1];
    }

out:
    up_write(&current->mm->mmap_sem);

errorDealloc:
    if (err < VMCI_SUCCESS) {
        if (attach->producePages != NULL) {
            VMCI_FreeKernelMem(attach->producePages,
                               attach->numProducePages *
                               sizeof attach->producePages[0]);
        }
        if (attach->consumePages != NULL) {
            VMCI_FreeKernelMem(attach->consumePages,
                               attach->numConsumePages *
                               sizeof attach->consumePages[0]);
        }
    }

    return err;

#else
    /*
     * Host queue pair support for earlier kernels temporarily
     * disabled. See bug 365496.
     */

    ASSERT_NOT_IMPLEMENTED(FALSE);
#if 0
    attach->produceIoBuf = VMCI_AllocKernelMem(sizeof *attach->produceIoBuf,
                           VMCI_MEMORY_NORMAL);
    if (attach->produceIoBuf == NULL) {
        return VMCI_ERROR_NO_MEM;
    }

    attach->consumeIoBuf = VMCI_AllocKernelMem(sizeof *attach->consumeIoBuf,
                           VMCI_MEMORY_NORMAL);
    if (attach->consumeIoBuf == NULL) {
        VMCI_FreeKernelMem(attach->produceIoBuf,
                           sizeof *attach->produceIoBuf);
        return VMCI_ERROR_NO_MEM;
    }

    retval = map_user_kiobuf(WRITE, attach->produceIoBuf,
                             (VA)attach->produceBuffer,
                             attach->numProducePages * PAGE_SIZE);
    if (retval < 0) {
        err = VMCI_ERROR_NO_ACCESS;
        goto out;
    }

    retval = map_user_kiobuf(WRITE, attach->consumeIoBuf,
                             (VA)attach->consumeBuffer,
                             attach->numConsumePages * PAGE_SIZE);
    if (retval < 0) {
        unmap_kiobuf(attach->produceIoBuf);
        err = VMCI_ERROR_NO_ACCESS;
    }

    if (err == VMCI_SUCCESS) {
        produceQ->queueHeaderPtr = kmap(attach->produceIoBuf->maplist[0]);
        produceQ->page = &attach->produceIoBuf->maplist[1];
        consumeQ->queueHeaderPtr = kmap(attach->consumeIoBuf->maplist[0]);
        consumeQ->page = &attach->consumeIoBuf->maplist[1];
    }

out:

    if (err < VMCI_SUCCESS) {
        if (attach->produceIoBuf != NULL) {
            VMCI_FreeKernelMem(attach->produceIoBuf,
                               sizeof *attach->produceIoBuf);
        }
        if (attach->consumeIoBuf != NULL) {
            VMCI_FreeKernelMem(attach->consumeIoBuf,
                               sizeof *attach->consumeIoBuf);
        }
    }

    return err;
#else // 0 -- Instead just return FALSE
    return FALSE;
#endif // 0
#endif // Linux version >= 2.6.0
}