Esempio n. 1
0
/*
 *		dma map/unmap
 */
static int usbhsg_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
{
	struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
	struct usb_request *req = &ureq->req;
	struct usbhs_pipe *pipe = pkt->pipe;
	struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
	struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
	enum dma_data_direction dir;
	int ret = 0;

	dir = usbhs_pipe_is_dir_host(pipe);

	if (map) {
		/* it can not use scatter/gather */
		WARN_ON(req->num_sgs);

		ret = usb_gadget_map_request(&gpriv->gadget, req, dir);
		if (ret < 0)
			return ret;

		pkt->dma = req->dma;
	} else {
		usb_gadget_unmap_request(&gpriv->gadget, req, dir);
	}

	return ret;
}
Esempio n. 2
0
/*
 *		dma map/unmap
 */
static int usbhsg_dma_map(struct device *dev,
			  struct usbhs_pkt *pkt,
			  enum dma_data_direction dir)
{
	struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
	struct usb_request *req = &ureq->req;

	if (pkt->dma != DMA_ADDR_INVALID) {
		dev_err(dev, "dma is already mapped\n");
		return -EIO;
	}

	if (req->dma == DMA_ADDR_INVALID) {
		pkt->dma = dma_map_single(dev, pkt->buf, pkt->length, dir);
	} else {
		dma_sync_single_for_device(dev, req->dma, req->length, dir);
		pkt->dma = req->dma;
	}

	if (dma_mapping_error(dev, pkt->dma)) {
		dev_err(dev, "dma mapping error %x\n", pkt->dma);
		return -EIO;
	}

	return 0;
}
Esempio n. 3
0
static void usbhsg_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
{
	struct usbhs_pipe *pipe = pkt->pipe;
	struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
	struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);

	ureq->req.actual = pkt->actual;

	usbhsg_queue_pop(uep, ureq, 0);
}
Esempio n. 4
0
static void usbhsg_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
{
	struct usbhs_pipe *pipe = pkt->pipe;
	struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
	struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
	unsigned long flags;

	ureq->req.actual = pkt->actual;

	usbhs_lock(priv, flags);
	if (uep)
		__usbhsg_queue_pop(uep, ureq, 0);
	usbhs_unlock(priv, flags);
}
Esempio n. 5
0
/*
 *
 *		usb_dcp_ops
 *
 */
static int usbhsg_pipe_disable(struct usbhsg_uep *uep)
{
	struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
	struct usbhs_pkt *pkt;

	while (1) {
		pkt = usbhs_pkt_pop(pipe, NULL);
		if (!pkt)
			break;

		usbhsg_queue_pop(uep, usbhsg_pkt_to_ureq(pkt), -ECONNRESET);
	}

	usbhs_pipe_disable(pipe);

	return 0;
}
Esempio n. 6
0
static int usbhsg_dma_unmap(struct device *dev,
			    struct usbhs_pkt *pkt,
			    enum dma_data_direction dir)
{
	struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
	struct usb_request *req = &ureq->req;

	if (pkt->dma == DMA_ADDR_INVALID) {
		dev_err(dev, "dma is not mapped\n");
		return -EIO;
	}

	if (req->dma == DMA_ADDR_INVALID)
		dma_unmap_single(dev, pkt->dma, pkt->length, dir);
	else
		dma_sync_single_for_cpu(dev, req->dma, req->length, dir);

	pkt->dma = DMA_ADDR_INVALID;

	return 0;
}