/* * Handle actual transfers of data. */ static int sbullr_transfer (Sbull_Dev *dev, char *buf, size_t count, loff_t *offset, int rw) { struct kiobuf *iobuf; int result; /* Only block alignment and size allowed */ if ((*offset & SBULLR_SECTOR_MASK) || (count & SBULLR_SECTOR_MASK)) return -EINVAL; if ((unsigned long) buf & SBULLR_SECTOR_MASK) return -EINVAL; /* Allocate an I/O vector */ result = alloc_kiovec(1, &iobuf); if (result) return result; /* Map the user I/O buffer and do the I/O. */ result = map_user_kiobuf(rw, iobuf, (unsigned long) buf, count); if (result) { free_kiovec(1, &iobuf); return result; } spin_lock(&dev->lock); result = sbullr_rw_iovec(dev, iobuf, rw, *offset >> SBULLR_SECTOR_SHIFT, count >> SBULLR_SECTOR_SHIFT); spin_unlock(&dev->lock); /* Clean up and return. */ unmap_kiobuf(iobuf); free_kiovec(1, &iobuf); if (result > 0) *offset += result << SBULLR_SECTOR_SHIFT; return result << SBULLR_SECTOR_SHIFT; }
int VMCIHost_GetUserMemory(PageStoreAttachInfo *attach, // IN/OUT VMCIQueue *produceQ, // OUT VMCIQueue *consumeQ) // OUT { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) int retval; int err = VMCI_SUCCESS; attach->producePages = VMCI_AllocKernelMem(attach->numProducePages * sizeof attach->producePages[0], VMCI_MEMORY_NORMAL); if (attach->producePages == NULL) { return VMCI_ERROR_NO_MEM; } attach->consumePages = VMCI_AllocKernelMem(attach->numConsumePages * sizeof attach->consumePages[0], VMCI_MEMORY_NORMAL); if (attach->consumePages == NULL) { err = VMCI_ERROR_NO_MEM; goto errorDealloc; } down_write(¤t->mm->mmap_sem); retval = get_user_pages(current, current->mm, (VA)attach->produceBuffer, attach->numProducePages, 1, 0, attach->producePages, NULL); if (retval < attach->numProducePages) { Log("get_user_pages(produce) failed: %d\n", retval); if (retval > 0) { int i; for (i = 0; i < retval; i++) { page_cache_release(attach->producePages[i]); } } err = VMCI_ERROR_NO_MEM; goto out; } retval = get_user_pages(current, current->mm, (VA)attach->consumeBuffer, attach->numConsumePages, 1, 0, attach->consumePages, NULL); if (retval < attach->numConsumePages) { int i; Log("get_user_pages(consume) failed: %d\n", retval); if (retval > 0) { for (i = 0; i < retval; i++) { page_cache_release(attach->consumePages[i]); } } for (i = 0; i < attach->numProducePages; i++) { page_cache_release(attach->producePages[i]); } err = VMCI_ERROR_NO_MEM; } if (err == VMCI_SUCCESS) { produceQ->queueHeaderPtr = kmap(attach->producePages[0]); produceQ->page = &attach->producePages[1]; consumeQ->queueHeaderPtr = kmap(attach->consumePages[0]); consumeQ->page = &attach->consumePages[1]; } out: up_write(¤t->mm->mmap_sem); errorDealloc: if (err < VMCI_SUCCESS) { if (attach->producePages != NULL) { VMCI_FreeKernelMem(attach->producePages, attach->numProducePages * sizeof attach->producePages[0]); } if (attach->consumePages != NULL) { VMCI_FreeKernelMem(attach->consumePages, attach->numConsumePages * sizeof attach->consumePages[0]); } } return err; #else /* * Host queue pair support for earlier kernels temporarily * disabled. See bug 365496. */ ASSERT_NOT_IMPLEMENTED(FALSE); #if 0 attach->produceIoBuf = VMCI_AllocKernelMem(sizeof *attach->produceIoBuf, VMCI_MEMORY_NORMAL); if (attach->produceIoBuf == NULL) { return VMCI_ERROR_NO_MEM; } attach->consumeIoBuf = VMCI_AllocKernelMem(sizeof *attach->consumeIoBuf, VMCI_MEMORY_NORMAL); if (attach->consumeIoBuf == NULL) { VMCI_FreeKernelMem(attach->produceIoBuf, sizeof *attach->produceIoBuf); return VMCI_ERROR_NO_MEM; } retval = map_user_kiobuf(WRITE, attach->produceIoBuf, (VA)attach->produceBuffer, attach->numProducePages * PAGE_SIZE); if (retval < 0) { err = VMCI_ERROR_NO_ACCESS; goto out; } retval = map_user_kiobuf(WRITE, attach->consumeIoBuf, (VA)attach->consumeBuffer, attach->numConsumePages * PAGE_SIZE); if (retval < 0) { unmap_kiobuf(attach->produceIoBuf); err = VMCI_ERROR_NO_ACCESS; } if (err == VMCI_SUCCESS) { produceQ->queueHeaderPtr = kmap(attach->produceIoBuf->maplist[0]); produceQ->page = &attach->produceIoBuf->maplist[1]; consumeQ->queueHeaderPtr = kmap(attach->consumeIoBuf->maplist[0]); consumeQ->page = &attach->consumeIoBuf->maplist[1]; } out: if (err < VMCI_SUCCESS) { if (attach->produceIoBuf != NULL) { VMCI_FreeKernelMem(attach->produceIoBuf, sizeof *attach->produceIoBuf); } if (attach->consumeIoBuf != NULL) { VMCI_FreeKernelMem(attach->consumeIoBuf, sizeof *attach->consumeIoBuf); } } return err; #else // 0 -- Instead just return FALSE return FALSE; #endif // 0 #endif // Linux version >= 2.6.0 }