Example #1
0
int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
		       void __user *userbuf, int size_in_bytes)
{
	struct ivtv_dma_page_info user_dma;
	struct ivtv_user_dma *dma = &itv->udma;
	int i, err;

	IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr);

	/* Still in USE */
	if (dma->SG_length || dma->page_count) {
		IVTV_DEBUG_WARN("ivtv_udma_setup: SG_length %d page_count %d still full?\n",
			   dma->SG_length, dma->page_count);
		return -EBUSY;
	}

	ivtv_udma_get_page_info(&user_dma, (unsigned long)userbuf, size_in_bytes);

	if (user_dma.page_count <= 0) {
		IVTV_DEBUG_WARN("ivtv_udma_setup: Error %d page_count from %d bytes %d offset\n",
			   user_dma.page_count, size_in_bytes, user_dma.offset);
		return -EINVAL;
	}

	/* Get user pages for DMA Xfer */
	down_read(&current->mm->mmap_sem);
	err = get_user_pages(current, current->mm,
			user_dma.uaddr, user_dma.page_count, 0, 1, dma->map, NULL);
	up_read(&current->mm->mmap_sem);

	if (user_dma.page_count != err) {
		IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
			   err, user_dma.page_count);
		return -EINVAL;
	}

	dma->page_count = user_dma.page_count;

	/* Fill SG List with new values */
	if (ivtv_udma_fill_sg_list(dma, &user_dma, 0) < 0) {
		for (i = 0; i < dma->page_count; i++) {
			put_page(dma->map[i]);
		}
		dma->page_count = 0;
		return -ENOMEM;
	}

	/* Map SG List */
	dma->SG_length = pci_map_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);

	/* Fill SG Array with new values */
	ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1);

	/* Tag SG Array with Interrupt Bit */
	dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000);

	ivtv_udma_sync_for_device(itv);
	return dma->page_count;
}
Example #2
0
static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
				 struct ivtv_dma_frame *args)
{
	struct ivtv_dma_page_info y_dma;
	struct ivtv_dma_page_info uv_dma;

	int i;
	int y_pages, uv_pages;

	unsigned long y_buffer_offset, uv_buffer_offset;
	int y_decode_height, uv_decode_height, y_size;
	int frame = atomic_read(&itv->yuv_info.next_fill_frame);

	y_buffer_offset = IVTV_DEC_MEM_START + yuv_offset[frame];
	uv_buffer_offset = y_buffer_offset + IVTV_YUV_BUFFER_UV_OFFSET;

	y_decode_height = uv_decode_height = args->src.height + args->src.top;

	if (y_decode_height < 512-16)
		y_buffer_offset += 720 * 16;

	if (y_decode_height & 15)
		y_decode_height = (y_decode_height + 16) & ~15;

	if (uv_decode_height & 31)
		uv_decode_height = (uv_decode_height + 32) & ~31;

	y_size = 720 * y_decode_height;

	/* Still in USE */
	if (dma->SG_length || dma->page_count) {
		IVTV_DEBUG_WARN("prep_user_dma: SG_length %d page_count %d still full?\n",
				dma->SG_length, dma->page_count);
		return -EBUSY;
	}

	ivtv_udma_get_page_info (&y_dma, (unsigned long)args->y_source, 720 * y_decode_height);
	ivtv_udma_get_page_info (&uv_dma, (unsigned long)args->uv_source, 360 * uv_decode_height);

	/* Get user pages for DMA Xfer */
	down_read(&current->mm->mmap_sem);
	y_pages = get_user_pages(current, current->mm, y_dma.uaddr, y_dma.page_count, 0, 1, &dma->map[0], NULL);
	uv_pages = get_user_pages(current, current->mm, uv_dma.uaddr, uv_dma.page_count, 0, 1, &dma->map[y_pages], NULL);
	up_read(&current->mm->mmap_sem);

	dma->page_count = y_dma.page_count + uv_dma.page_count;

	if (y_pages + uv_pages != dma->page_count) {
		IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
				y_pages + uv_pages, dma->page_count);

		for (i = 0; i < dma->page_count; i++) {
			put_page(dma->map[i]);
		}
		dma->page_count = 0;
		return -EINVAL;
	}

	/* Fill & map SG List */
	ivtv_udma_fill_sg_list (dma, &uv_dma, ivtv_udma_fill_sg_list (dma, &y_dma, 0));
	dma->SG_length = pci_map_sg(itv->dev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);

	/* Fill SG Array with new values */
	ivtv_udma_fill_sg_array (dma, y_buffer_offset, uv_buffer_offset, y_size);

	/* If we've offset the y plane, ensure top area is blanked */
	if (args->src.height + args->src.top < 512-16) {
		if (itv->yuv_info.blanking_dmaptr) {
			dma->SGarray[dma->SG_length].size = cpu_to_le32(720*16);
			dma->SGarray[dma->SG_length].src = cpu_to_le32(itv->yuv_info.blanking_dmaptr);
			dma->SGarray[dma->SG_length].dst = cpu_to_le32(IVTV_DEC_MEM_START + yuv_offset[frame]);
			dma->SG_length++;
		}
	}

	/* Tag SG Array with Interrupt Bit */
	dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000);

	ivtv_udma_sync_for_device(itv);
	return 0;
}
Example #3
0
static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
				  struct ivtv_dma_frame *args)
{
	struct ivtv_dma_page_info y_dma;
	struct ivtv_dma_page_info uv_dma;
	struct yuv_playback_info *yi = &itv->yuv_info;
	u8 frame = yi->draw_frame;
	struct yuv_frame_info *f = &yi->new_frame_info[frame];
	int i;
	int y_pages, uv_pages;
	unsigned long y_buffer_offset, uv_buffer_offset;
	int y_decode_height, uv_decode_height, y_size;

	y_buffer_offset = IVTV_DECODER_OFFSET + yuv_offset[frame];
	uv_buffer_offset = y_buffer_offset + IVTV_YUV_BUFFER_UV_OFFSET;

	y_decode_height = uv_decode_height = f->src_h + f->src_y;

	if (f->offset_y)
		y_buffer_offset += 720 * 16;

	if (y_decode_height & 15)
		y_decode_height = (y_decode_height + 16) & ~15;

	if (uv_decode_height & 31)
		uv_decode_height = (uv_decode_height + 32) & ~31;

	y_size = 720 * y_decode_height;

	/* Still in USE */
	if (dma->SG_length || dma->page_count) {
		IVTV_DEBUG_WARN
		    ("prep_user_dma: SG_length %d page_count %d still full?\n",
		     dma->SG_length, dma->page_count);
		return -EBUSY;
	}

	ivtv_udma_get_page_info (&y_dma, (unsigned long)args->y_source, 720 * y_decode_height);
	ivtv_udma_get_page_info (&uv_dma, (unsigned long)args->uv_source, 360 * uv_decode_height);

	/* Get user pages for DMA Xfer */
	y_pages = get_user_pages_unlocked(y_dma.uaddr,
			y_dma.page_count, &dma->map[0], FOLL_FORCE);
	uv_pages = 0; /* silence gcc. value is set and consumed only if: */
	if (y_pages == y_dma.page_count) {
		uv_pages = get_user_pages_unlocked(uv_dma.uaddr,
				uv_dma.page_count, &dma->map[y_pages],
				FOLL_FORCE);
	}

	if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) {
		int rc = -EFAULT;

		if (y_pages == y_dma.page_count) {
			IVTV_DEBUG_WARN
				("failed to map uv user pages, returned %d "
				 "expecting %d\n", uv_pages, uv_dma.page_count);

			if (uv_pages >= 0) {
				for (i = 0; i < uv_pages; i++)
					put_page(dma->map[y_pages + i]);
				rc = -EFAULT;
			} else {
				rc = uv_pages;
			}
		} else {
			IVTV_DEBUG_WARN
				("failed to map y user pages, returned %d "
				 "expecting %d\n", y_pages, y_dma.page_count);
		}
		if (y_pages >= 0) {
			for (i = 0; i < y_pages; i++)
				put_page(dma->map[i]);
			/*
			 * Inherit the -EFAULT from rc's
			 * initialization, but allow it to be
			 * overriden by uv_pages above if it was an
			 * actual errno.
			 */
		} else {
			rc = y_pages;
		}
		return rc;
	}

	dma->page_count = y_pages + uv_pages;

	/* Fill & map SG List */
	if (ivtv_udma_fill_sg_list (dma, &uv_dma, ivtv_udma_fill_sg_list (dma, &y_dma, 0)) < 0) {
		IVTV_DEBUG_WARN("could not allocate bounce buffers for highmem userspace buffers\n");
		for (i = 0; i < dma->page_count; i++) {
			put_page(dma->map[i]);
		}
		dma->page_count = 0;
		return -ENOMEM;
	}
	dma->SG_length = pci_map_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);

	/* Fill SG Array with new values */
	ivtv_udma_fill_sg_array(dma, y_buffer_offset, uv_buffer_offset, y_size);

	/* If we've offset the y plane, ensure top area is blanked */
	if (f->offset_y && yi->blanking_dmaptr) {
		dma->SGarray[dma->SG_length].size = cpu_to_le32(720*16);
		dma->SGarray[dma->SG_length].src = cpu_to_le32(yi->blanking_dmaptr);
		dma->SGarray[dma->SG_length].dst = cpu_to_le32(IVTV_DECODER_OFFSET + yuv_offset[frame]);
		dma->SG_length++;
	}

	/* Tag SG Array with Interrupt Bit */
	dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000);

	ivtv_udma_sync_for_device(itv);
	return 0;
}