static void async_pf_execute(struct work_struct *work) { struct kvm_async_pf *apf = container_of(work, struct kvm_async_pf, work); struct mm_struct *mm = apf->mm; struct kvm_vcpu *vcpu = apf->vcpu; unsigned long addr = apf->addr; gva_t gva = apf->gva; might_sleep(); get_user_pages_unlocked(NULL, mm, addr, 1, 1, 0, NULL); kvm_async_page_present_sync(vcpu, apf); spin_lock(&vcpu->async_pf.lock); list_add_tail(&apf->link, &vcpu->async_pf.done); spin_unlock(&vcpu->async_pf.lock); /* * apf may be freed by kvm_check_async_pf_completion() after * this point */ trace_kvm_async_pf_completed(addr, gva); /* * This memory barrier pairs with prepare_to_wait's set_current_state() */ smp_mb(); if (waitqueue_active(&vcpu->wq)) wake_up_interruptible(&vcpu->wq); mmput(mm); kvm_put_kvm(vcpu->kvm); }
int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { struct mm_struct *mm = current->mm; int nr, ret; start &= PAGE_MASK; nr = __get_user_pages_fast(start, nr_pages, write, pages); ret = nr; if (nr < nr_pages) { pr_devel(" slow path ! nr = %d\n", nr); /* Try to get the remaining pages with get_user_pages */ start += nr << PAGE_SHIFT; pages += nr; ret = get_user_pages_unlocked(current, mm, start, nr_pages - nr, write, 0, pages); /* Have to be a bit careful with return values */ if (nr > 0) { if (ret < 0) ret = nr; else ret += nr; } } return ret; }
/** * process_vm_rw_single_vec - read/write pages from task specified * @addr: start memory address of target process * @len: size of area to copy to/from * @iter: where to copy to/from locally * @process_pages: struct pages area that can store at least * nr_pages_to_copy struct page pointers * @mm: mm for task * @task: task to read/write from * @vm_write: 0 means copy from, 1 means copy to * Returns 0 on success or on failure error code */ static int process_vm_rw_single_vec(unsigned long addr, unsigned long len, struct iov_iter *iter, struct page **process_pages, struct mm_struct *mm, struct task_struct *task, int vm_write) { unsigned long pa = addr & PAGE_MASK; unsigned long start_offset = addr - pa; unsigned long nr_pages; ssize_t rc = 0; unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES / sizeof(struct pages *); /* Work out address and page range required */ if (len == 0) return 0; nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; while (!rc && nr_pages && iov_iter_count(iter)) { int pages = min(nr_pages, max_pages_per_loop); size_t bytes; /* Get the pages we're interested in */ pages = get_user_pages_unlocked(task, mm, pa, pages, vm_write, 0, process_pages); if (pages <= 0) return -EFAULT; bytes = pages * PAGE_SIZE - start_offset; if (bytes > len) bytes = len; rc = process_vm_rw_pages(process_pages, start_offset, bytes, iter, vm_write); len -= bytes; start_offset = 0; nr_pages -= pages; pa += pages * PAGE_SIZE; while (pages) put_page(process_pages[--pages]); } return rc; }
int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr, void __user *userbuf, int size_in_bytes) { struct ivtv_dma_page_info user_dma; struct ivtv_user_dma *dma = &itv->udma; int i, err; IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr); /* Still in USE */ if (dma->SG_length || dma->page_count) { IVTV_DEBUG_WARN("ivtv_udma_setup: SG_length %d page_count %d still full?\n", dma->SG_length, dma->page_count); return -EBUSY; } ivtv_udma_get_page_info(&user_dma, (unsigned long)userbuf, size_in_bytes); if (user_dma.page_count <= 0) { IVTV_DEBUG_WARN("ivtv_udma_setup: Error %d page_count from %d bytes %d offset\n", user_dma.page_count, size_in_bytes, user_dma.offset); return -EINVAL; } /* Get user pages for DMA Xfer */ err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count, 0, 1, dma->map); if (user_dma.page_count != err) { IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n", err, user_dma.page_count); if (err >= 0) { for (i = 0; i < err; i++) put_page(dma->map[i]); return -EINVAL; } return err; } dma->page_count = user_dma.page_count; /* Fill SG List with new values */ if (ivtv_udma_fill_sg_list(dma, &user_dma, 0) < 0) { for (i = 0; i < dma->page_count; i++) { put_page(dma->map[i]); } dma->page_count = 0; return -ENOMEM; } /* Map SG List */ dma->SG_length = pci_map_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE); /* Fill SG Array with new values */ ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1); /* Tag SG Array with Interrupt Bit */ dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000); ivtv_udma_sync_for_device(itv); return dma->page_count; }
static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma, struct ivtv_dma_frame *args) { struct ivtv_dma_page_info y_dma; struct ivtv_dma_page_info uv_dma; struct yuv_playback_info *yi = &itv->yuv_info; u8 frame = yi->draw_frame; struct yuv_frame_info *f = &yi->new_frame_info[frame]; int i; int y_pages, uv_pages; unsigned long y_buffer_offset, uv_buffer_offset; int y_decode_height, uv_decode_height, y_size; y_buffer_offset = IVTV_DECODER_OFFSET + yuv_offset[frame]; uv_buffer_offset = y_buffer_offset + IVTV_YUV_BUFFER_UV_OFFSET; y_decode_height = uv_decode_height = f->src_h + f->src_y; if (f->offset_y) y_buffer_offset += 720 * 16; if (y_decode_height & 15) y_decode_height = (y_decode_height + 16) & ~15; if (uv_decode_height & 31) uv_decode_height = (uv_decode_height + 32) & ~31; y_size = 720 * y_decode_height; /* Still in USE */ if (dma->SG_length || dma->page_count) { IVTV_DEBUG_WARN ("prep_user_dma: SG_length %d page_count %d still full?\n", dma->SG_length, dma->page_count); return -EBUSY; } ivtv_udma_get_page_info (&y_dma, (unsigned long)args->y_source, 720 * y_decode_height); ivtv_udma_get_page_info (&uv_dma, (unsigned long)args->uv_source, 360 * uv_decode_height); /* Get user pages for DMA Xfer */ y_pages = get_user_pages_unlocked(y_dma.uaddr, y_dma.page_count, &dma->map[0], FOLL_FORCE); uv_pages = 0; /* silence gcc. value is set and consumed only if: */ if (y_pages == y_dma.page_count) { uv_pages = get_user_pages_unlocked(uv_dma.uaddr, uv_dma.page_count, &dma->map[y_pages], FOLL_FORCE); } if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) { int rc = -EFAULT; if (y_pages == y_dma.page_count) { IVTV_DEBUG_WARN ("failed to map uv user pages, returned %d " "expecting %d\n", uv_pages, uv_dma.page_count); if (uv_pages >= 0) { for (i = 0; i < uv_pages; i++) put_page(dma->map[y_pages + i]); rc = -EFAULT; } else { rc = uv_pages; } } else { IVTV_DEBUG_WARN ("failed to map y user pages, returned %d " "expecting %d\n", y_pages, y_dma.page_count); } if (y_pages >= 0) { for (i = 0; i < y_pages; i++) put_page(dma->map[i]); /* * Inherit the -EFAULT from rc's * initialization, but allow it to be * overriden by uv_pages above if it was an * actual errno. */ } else { rc = y_pages; } return rc; } dma->page_count = y_pages + uv_pages; /* Fill & map SG List */ if (ivtv_udma_fill_sg_list (dma, &uv_dma, ivtv_udma_fill_sg_list (dma, &y_dma, 0)) < 0) { IVTV_DEBUG_WARN("could not allocate bounce buffers for highmem userspace buffers\n"); for (i = 0; i < dma->page_count; i++) { put_page(dma->map[i]); } dma->page_count = 0; return -ENOMEM; } dma->SG_length = pci_map_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE); /* Fill SG Array with new values */ ivtv_udma_fill_sg_array(dma, y_buffer_offset, uv_buffer_offset, y_size); /* If we've offset the y plane, ensure top area is blanked */ if (f->offset_y && yi->blanking_dmaptr) { dma->SGarray[dma->SG_length].size = cpu_to_le32(720*16); dma->SGarray[dma->SG_length].src = cpu_to_le32(yi->blanking_dmaptr); dma->SGarray[dma->SG_length].dst = cpu_to_le32(IVTV_DECODER_OFFSET + yuv_offset[frame]); dma->SG_length++; } /* Tag SG Array with Interrupt Bit */ dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000); ivtv_udma_sync_for_device(itv); return 0; }