Esempio n. 1
0
static void complete_rx(struct s3c_udc *dev, u8 ep_num)
{
	struct s3c_ep *ep = &dev->ep[ep_num];
	struct s3c_request *req = NULL;
	u32 ep_tsr = 0, xfer_size = 0, xfer_length, is_short = 0;

	if (list_empty(&ep->queue)) {
		DEBUG_OUT_EP("%s: RX DMA done : NULL REQ on OUT EP-%d\n",
					__func__, ep_num);
		return;

	}

	req = list_entry(ep->queue.next, struct s3c_request, queue);

	ep_tsr = readl(S3C_UDC_OTG_DOEPTSIZ(ep_num));

	if (ep_num == EP0_CON)
		xfer_size = (ep_tsr & 0x7f);

	else
		xfer_size = (ep_tsr & 0x7fff);

	__dma_single_cpu_to_dev(req->req.buf, req->req.length, DMA_FROM_DEVICE);
	xfer_length = req->req.length - xfer_size;
	req->req.actual += min(xfer_length, req->req.length - req->req.actual);
	is_short = (xfer_length < ep->ep.maxpacket);

	DEBUG_OUT_EP("%s: RX DMA done : ep = %d, rx bytes = %d/%d, "
		     "is_short = %d, DOEPTSIZ = 0x%x, remained bytes = %d\n",
			__func__, ep_num, req->req.actual, req->req.length,
			is_short, ep_tsr, xfer_size);

	if (is_short || req->req.actual == xfer_length) {
		if (ep_num == EP0_CON && dev->ep0state == DATA_STATE_RECV) {
			DEBUG_OUT_EP("	=> Send ZLP\n");
			dev->ep0state = WAIT_FOR_SETUP;
			s3c_udc_ep0_zlp();

		} else {
			done(ep, req, 0);

			if (!list_empty(&ep->queue)) {
				req = list_entry(ep->queue.next,
					struct s3c_request, queue);
				DEBUG_OUT_EP("%s: Next Rx request start...\n",
					 __func__);
				setdma_rx(ep, req);
			}
		}
	}
Esempio n. 2
0
static inline dma_addr_t map_single_or_page(struct device *dev, void *ptr,
		struct page *page, unsigned long offset,  size_t size,
		enum dma_data_direction dir)
{
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
	dma_addr_t dma_addr;
	int needs_bounce = 0;

	if (device_info)
		DO_STATS ( device_info->map_op_count++ );

	if (page)
		dma_addr = page_to_dma(dev, page) + offset;
	else
		dma_addr = virt_to_dma(dev, ptr);

	if (dev->dma_mask) {
		unsigned long mask = *dev->dma_mask;
		unsigned long limit;

		limit = (mask - 1) | mask;
		limit = (limit + 1) & ~limit;
		if (limit && size > limit) {
			dev_err(dev, "DMA mapping too big (requested %#x "
				"mask %#Lx)\n", size, *dev->dma_mask);
			return ~0;
		}

		/*
		 * Figure out if we need to bounce from the DMA mask.
		 */
		needs_bounce = (dma_addr & ~mask) ||
			(limit && (dma_addr + size > limit));
	}

	if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
		struct safe_buffer *buf;

		buf = alloc_safe_buffer(device_info, ptr, page, offset, size, dir);
		if (buf == 0) {
			dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
			       __func__, ptr);
			return 0;
		}

                if (buf->page)
			dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped "
				"to %p (dma=%#x)\n", __func__,
				page_address(buf->page),
				page_to_dma(dev, buf->page),
				buf->safe, buf->safe_dma_addr);
		else
			dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped "
				"to %p (dma=%#x)\n", __func__,
				buf->ptr, virt_to_dma(dev, buf->ptr),
				buf->safe, buf->safe_dma_addr);

		if ((dir == DMA_TO_DEVICE) ||
		    (dir == DMA_BIDIRECTIONAL)) {
			if (page)
				ptr = kmap_atomic(page, KM_BOUNCE_READ) + offset;
			dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
				__func__, ptr, buf->safe, size);
			memcpy(buf->safe, ptr, size);
			wmb();
			if (page)
				kunmap_atomic(ptr - offset, KM_BOUNCE_READ);
		}
		dma_addr = buf->safe_dma_addr;
	} else {
		/*
		 * We don't need to sync the DMA buffer since
		 * it was allocated via the coherent allocators.
		 */
		if (page)
			__dma_page_cpu_to_dev(page, offset, size, dir);
		else
			__dma_single_cpu_to_dev(ptr, size, dir);
	}

	return dma_addr;
}
static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
		enum dma_data_direction dir)
{
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
	dma_addr_t dma_addr;
	int needs_bounce = 0;

	if (device_info)
		DO_STATS ( device_info->map_op_count++ );

	dma_addr = virt_to_dma(dev, ptr);

	if (dev->dma_mask) {
		unsigned long mask = *dev->dma_mask;
		unsigned long limit;

		limit = (mask + 1) & ~mask;
		if (limit && size > limit) {
			dev_err(dev, "DMA mapping too big (requested %#x "
				"mask %#Lx)\n", size, *dev->dma_mask);
			return ~0;
		}

		/*
		 * Figure out if we need to bounce from the DMA mask.
		 */
		needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
	}

	if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
		struct safe_buffer *buf;

		buf = alloc_safe_buffer(device_info, ptr, size, dir);
		if (buf == 0) {
			dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
			       __func__, ptr);
			return ~0;
		}

		dev_dbg(dev,
			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
			__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
			buf->safe, buf->safe_dma_addr);

		if ((dir == DMA_TO_DEVICE) ||
		    (dir == DMA_BIDIRECTIONAL)) {
			dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
				__func__, ptr, buf->safe, size);
			memcpy(buf->safe, ptr, size);
		}
		ptr = buf->safe;

		dma_addr = buf->safe_dma_addr;
	} else {
		/*
		 * We don't need to sync the DMA buffer since
		 * it was allocated via the coherent allocators.
		 */
		__dma_single_cpu_to_dev(ptr, size, dir);
	}

	return dma_addr;
}