static void s3c24xx_serial_tx_dma_complete(void *args) { struct s3c24xx_uart_port *ourport = args; struct uart_port *port = &ourport->port; struct circ_buf *xmit = &port->state->xmit; struct s3c24xx_uart_dma *dma = ourport->dma; struct dma_tx_state state; unsigned long flags; int count; dmaengine_tx_status(dma->tx_chan, dma->tx_cookie, &state); count = dma->tx_bytes_requested - state.residue; async_tx_ack(dma->tx_desc); dma_sync_single_for_cpu(ourport->port.dev, dma->tx_transfer_addr, dma->tx_size, DMA_TO_DEVICE); spin_lock_irqsave(&port->lock, flags); xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); port->icount.tx += count; ourport->tx_in_progress = 0; if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); s3c24xx_serial_start_next_tx(ourport); spin_unlock_irqrestore(&port->lock, flags); }
static int port_send(struct eth_device *edev, void *data, int len) { struct port_priv *port = edev->priv; struct txdesc *txdesc = port->txdesc; u32 cmd_sts; int ret; /* flush transmit data */ dma_sync_single_for_device((unsigned long)data, len, DMA_TO_DEVICE); txdesc->cmd_sts = TXDESC_OWNED_BY_DMA; txdesc->cmd_sts |= TXDESC_FIRST | TXDESC_LAST; txdesc->cmd_sts |= TXDESC_ZERO_PADDING | TXDESC_GEN_CRC; txdesc->buf_ptr = data; txdesc->byte_cnt = len; /* assign tx descriptor and issue send command */ writel((u32)txdesc, port->regs + PORT_TCQDP(UTXQ)); writel(BIT(UTXQ), port->regs + PORT_TQC); /* wait for packet transmit completion */ ret = wait_on_timeout(TRANSFER_TIMEOUT, (readl(&txdesc->cmd_sts) & TXDESC_OWNED_BY_DMA) == 0); dma_sync_single_for_cpu((unsigned long)data, len, DMA_TO_DEVICE); if (ret) { dev_err(&edev->dev, "transmit timeout\n"); return ret; } cmd_sts = readl(&txdesc->cmd_sts); if ((cmd_sts & TXDESC_LAST) && (cmd_sts & TXDESC_ERROR)) { dev_err(&edev->dev, "transmit error %d\n", (cmd_sts & TXDESC_ERROR_MASK) >> TXDESC_ERROR_SHIFT); return ret; }
static void xilly_dma_sync_single_for_cpu_of(struct xilly_endpoint *ep, dma_addr_t dma_handle, size_t size, int direction) { dma_sync_single_for_cpu(ep->dev, dma_handle, size, direction); }
static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { struct denali_nand_info *denali = mtd_to_denali(mtd); dma_addr_t addr = denali->buf.dma_buf; size_t size = mtd->writesize + mtd->oobsize; uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP; if (page != denali->page) { dev_err(denali->dev, "IN %s: page %d is not equal to denali->page %d", __func__, page, denali->page); BUG(); } setup_ecc_for_xfer(denali, false, true); denali_enable_dma(denali, true); dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE); clear_interrupts(denali); denali_setup_dma(denali, DENALI_READ); /* wait for operation to complete */ wait_for_irq(denali, irq_mask); dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE); denali_enable_dma(denali, false); memcpy(buf, denali->buf.buf, mtd->writesize); memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize); return 0; }
/* This is always called in spinlock protected mode, so * modify timeout timer is safe here */ void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts) { struct hsu_dma_buffer *dbuf = &up->rxbuf; struct hsu_dma_chan *chan = up->rxc; struct uart_port *port = &up->port; struct tty_struct *tty = port->state->port.tty; int count; if (!tty) return; /* * First need to know how many is already transferred, * then check if its a timeout DMA irq, and return * the trail bytes out, push them up and reenable the * channel */ /* Timeout IRQ, need wait some time, see Errata 2 */ if (int_sts & 0xf00) udelay(2); /* Stop the channel */ chan_writel(chan, HSU_CH_CR, 0x0); count = chan_readl(chan, HSU_CH_D0SAR) - dbuf->dma_addr; if (!count) { /* Restart the channel before we leave */ chan_writel(chan, HSU_CH_CR, 0x3); return; } dma_sync_single_for_cpu(port->dev, dbuf->dma_addr, dbuf->dma_size, DMA_FROM_DEVICE); /* * Head will only wrap around when we recycle * the DMA buffer, and when that happens, we * explicitly set tail to 0. So head will * always be greater than tail. */ tty_insert_flip_string(tty, dbuf->buf, count); port->icount.rx += count; dma_sync_single_for_device(up->port.dev, dbuf->dma_addr, dbuf->dma_size, DMA_FROM_DEVICE); /* Reprogram the channel */ chan_writel(chan, HSU_CH_D0SAR, dbuf->dma_addr); chan_writel(chan, HSU_CH_D0TSR, dbuf->dma_size); chan_writel(chan, HSU_CH_DCR, 0x1 | (0x1 << 8) | (0x1 << 16) | (0x1 << 24) /* timeout bit, see HSU Errata 1 */ ); tty_flip_buffer_push(tty); chan_writel(chan, HSU_CH_CR, 0x3); }
static void mtk8250_dma_rx_complete(void *param) { struct uart_8250_port *up = param; struct uart_8250_dma *dma = up->dma; struct mtk8250_data *data = up->port.private_data; struct tty_port *tty_port = &up->port.state->port; struct dma_tx_state state; unsigned char *ptr; int copied; dma_sync_single_for_cpu(dma->rxchan->device->dev, dma->rx_addr, dma->rx_size, DMA_FROM_DEVICE); dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state); if (data->rx_status == DMA_RX_SHUTDOWN) return; if ((data->rx_pos + state.residue) <= dma->rx_size) { ptr = (unsigned char *)(data->rx_pos + dma->rx_buf); copied = tty_insert_flip_string(tty_port, ptr, state.residue); } else { ptr = (unsigned char *)(data->rx_pos + dma->rx_buf); copied = tty_insert_flip_string(tty_port, ptr, dma->rx_size - data->rx_pos); ptr = (unsigned char *)(dma->rx_buf); copied += tty_insert_flip_string(tty_port, ptr, data->rx_pos + state.residue - dma->rx_size); } up->port.icount.rx += copied; tty_flip_buffer_push(tty_port); mtk8250_rx_dma(up); }
static void __dma_tx_complete(void *param) { struct uart_8250_port *p = param; struct uart_8250_dma *dma = p->dma; struct circ_buf *xmit = &p->port.state->xmit; unsigned long flags; int ret; dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr, UART_XMIT_SIZE, DMA_TO_DEVICE); spin_lock_irqsave(&p->port.lock, flags); dma->tx_running = 0; xmit->tail += dma->tx_size; xmit->tail &= UART_XMIT_SIZE - 1; p->port.icount.tx += dma->tx_size; if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&p->port); ret = serial8250_tx_dma(p); if (ret) { p->ier |= UART_IER_THRI; serial_port_out(&p->port, UART_IER, p->ier); } spin_unlock_irqrestore(&p->port.lock, flags); }
static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup, struct tty_port *tty, unsigned int count) { int copied; /* If count is zero, then there is no data to be copied */ if (!count) return; tup->uport.icount.rx += count; if (!tty) { dev_err(tup->uport.dev, "No tty port\n"); return; } dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys, TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE); copied = tty_insert_flip_string(tty, ((unsigned char *)(tup->rx_dma_buf_virt)), count); if (copied != count) { WARN_ON(1); dev_err(tup->uport.dev, "RxData copy to tty layer failed\n"); } dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys, TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE); }
static int mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len) { int status; if (len > sizeof(*host->data)) { WARN_ON(1); return -EIO; } host->status.len = len; if (host->dma_dev) dma_sync_single_for_device(host->dma_dev, host->data_dma, sizeof(*host->data), DMA_FROM_DEVICE); status = spi_sync_locked(host->spi, &host->readback); if (host->dma_dev) dma_sync_single_for_cpu(host->dma_dev, host->data_dma, sizeof(*host->data), DMA_FROM_DEVICE); return status; }
static inline void efx_sync_rx_buffer(struct efx_nic *efx, struct efx_rx_buffer *rx_buf, unsigned int len) { dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len, DMA_FROM_DEVICE); }
static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data) { struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data; struct cmdq_task_cb *cb = &pkt->cb; struct cmdq_client *client = (struct cmdq_client *)pkt->cl; if (client->timeout_ms != CMDQ_NO_TIMEOUT) { unsigned long flags = 0; spin_lock_irqsave(&client->lock, flags); if (--client->pkt_cnt == 0) del_timer(&client->timer); else mod_timer(&client->timer, jiffies + msecs_to_jiffies(client->timeout_ms)); spin_unlock_irqrestore(&client->lock, flags); } dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base, pkt->cmd_buf_size, DMA_TO_DEVICE); if (cb->cb) { data.data = cb->data; cb->cb(data); } }
static void ehca_sync_single_for_cpu(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction dir) { dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); }
static inline void sync_descbuffer_for_cpu(struct b43_dmaring *ring, dma_addr_t addr, size_t len) { B43_WARN_ON(ring->tx); dma_sync_single_for_cpu(ring->dev->dev->dma_dev, addr, len, DMA_FROM_DEVICE); }
/* When receiving RXQ done interrupt, qmu_interrupt calls this function. 1. Traverse GPD/BD data structures to count actual transferred length. 2. Set the done flag to notify rxstate_qmu() to report status to upper gadget driver. ported from proc_qmu_rx() from test driver. caller:qmu_interrupt after getting QMU done interrupt and TX is raised */ void qmu_done_rx(struct musb *musb, u8 ep_num, unsigned long flags) { TGPD *gpd = Rx_gpd_last[ep_num]; TGPD *gpd_current = (TGPD *) (os_readl(USB_QMU_RQCPR(ep_num))); struct musb_ep *musb_ep = &musb->endpoints[ep_num].ep_out; struct usb_request *request = NULL; struct musb_request *req; /* trying to give_back the request to gadget driver. */ req = next_request(musb_ep); if (!req) { qmu_printk(K_ERR, "[RXD]" "%s Cannot get next request of %d, " "but QMU has done.\n", __func__, ep_num); return; } else { request = &req->request; } /*Transfer PHY addr got from QMU register to VIR addr */ gpd_current = phys_to_virt((unsigned long)gpd_current); qmu_printk(K_DEBUG, "[RXD]" "%s EP%d, Last=%p, Current=%p, End=%p\n", __func__, ep_num, gpd, gpd_current, Rx_gpd_end[ep_num]); /*gpd_current should at least point to the next GPD to the previous last one. */ if (gpd == gpd_current) { qmu_printk(K_ERR, "[RXD][ERROR]" "%s gpd(%p) == gpd_current(%p)\n", __func__, gpd, gpd_current); qmu_printk(K_ERR, "[RXD][ERROR]" "EP%d RQCSR=%x, RQSAR=%x, RQCPR=%x, RQLDPR=%x\n", ep_num, os_readl(USB_QMU_RQCSR(ep_num)), os_readl(USB_QMU_RQSAR(ep_num)), os_readl(USB_QMU_RQCPR(ep_num)), os_readl(USB_QMU_RQLDPR(ep_num))); qmu_printk(K_ERR, "[RXD][ERROR]" "QCR0=%x, QCR1=%x, QCR2=%x, QCR3=%x, " "QGCSR=%x\n", os_readl(U3D_QCR0), os_readl(U3D_QCR1), os_readl(U3D_QCR2), os_readl(U3D_QCR3), os_readl(U3D_QGCSR)); qmu_printk(K_INFO, "[RXD][ERROR]" "HWO=%d, Next_GPD=%x ,DataBufLen=%d, " "DataBuf=%x, RecvLen=%d, Endpoint=%d\n", (DEV_UINT32) TGPD_GET_FLAG(gpd), (DEV_UINT32) TGPD_GET_NEXT(gpd), (DEV_UINT32) TGPD_GET_DataBUF_LEN(gpd), (DEV_UINT32) TGPD_GET_DATA(gpd), (DEV_UINT32) TGPD_GET_BUF_LEN(gpd), (DEV_UINT32) TGPD_GET_EPaddr(gpd)); return; } spin_unlock_irqrestore(&musb->lock, flags); /* invalidate GPD data in CPU */ dma_sync_single_for_cpu(musb->controller, virt_to_phys(gpd), sizeof(TGPD), DMA_FROM_DEVICE); spin_lock_irqsave(&musb->lock, flags); if (!gpd || !gpd_current) { qmu_printk(K_ERR, "[RXD][ERROR]" "%s EP%d, gpd=%p, gpd_current=%p, ishwo=%d, \ rx_gpd_last=%p, RQCPR=0x%x\n", __func__, ep_num, gpd, gpd_current, ((gpd == NULL) ? 999 : TGPD_IS_FLAGS_HWO(gpd)), Rx_gpd_last[ep_num], os_readl(USB_QMU_RQCPR(ep_num))); return; }
static int dmabuf_begin_cpu_access(struct dma_buf *buf, size_t size, size_t length, enum dma_data_direction direction) { struct dmabuf_file *priv = buf->priv; dma_sync_single_for_cpu(priv->dev, priv->phys, priv->size, direction); return 0; }
void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir) { struct scatterlist *sg; int i; for_each_sg(sglist, sg, nents, i) dma_sync_single_for_cpu(dev, sg_dma_address(sg), sg->length, dir); debug_dma_sync_sg_for_cpu(dev, sglist, nents, dir); }
static int __videobuf_sync(struct videobuf_queue *q, struct videobuf_buffer *buf) { struct videobuf_dma_contig_memory *mem = buf->priv; BUG_ON(!mem); MAGIC_CHECK(mem->magic, MAGIC_DC_MEM); dma_sync_single_for_cpu(q->dev, mem->dma_handle, mem->size, DMA_FROM_DEVICE); return 0; }
static void bgmac_dma_rx_poison_buf(struct device *dma_dev, struct bgmac_slot_info *slot) { struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET; dma_sync_single_for_cpu(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); rx->len = cpu_to_le16(0xdead); rx->flags = cpu_to_le16(0xbeef); dma_sync_single_for_device(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); }
/* writes a page. user specifies type, and this function handles the * configuration details. */ static int write_page(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf, bool raw_xfer) { struct denali_nand_info *denali = mtd_to_denali(mtd); dma_addr_t addr = denali->buf.dma_buf; size_t size = denali->mtd.writesize + denali->mtd.oobsize; uint32_t irq_status = 0; uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP | INTR_STATUS__PROGRAM_FAIL; /* if it is a raw xfer, we want to disable ecc, and send * the spare area. * !raw_xfer - enable ecc * raw_xfer - transfer spare */ setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer); /* copy buffer into DMA buffer */ memcpy(denali->buf.buf, buf, mtd->writesize); if (raw_xfer) { /* transfer the data to the spare area */ memcpy(denali->buf.buf + mtd->writesize, chip->oob_poi, mtd->oobsize); } dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE); clear_interrupts(denali); denali_enable_dma(denali, true); denali_setup_dma(denali, DENALI_WRITE); /* wait for operation to complete */ irq_status = wait_for_irq(denali, irq_mask); if (irq_status == 0) { dev_err(denali->dev, "timeout on write_page (type = %d)\n", raw_xfer); denali->status = (irq_status & INTR_STATUS__PROGRAM_FAIL) ? NAND_STATUS_FAIL : PASS; } denali_enable_dma(denali, false); dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE); return 0; }
/** * lsdma_read - copy data from driver buffer to user buffer * @dma: DMA information structure * @data: user buffer * @length: size of data * * Returns a negative error code on failure and * the number of bytes copied on success. **/ static ssize_t lsdma_read (struct master_dma *dma, char __user *data, size_t length) { /* Amount of data remaining in the current buffer */ const size_t max_length = dma->bufsize - dma->cpu_offset; size_t chunkvpage, chunkoffset, chunksize; size_t copied = 0; /* Number of bytes copied */ struct lsdma_desc *desc; /* Copy the rest of this buffer or the requested amount, * whichever is less */ if (length > max_length) { length = max_length; } while (length > 0) { chunkvpage = dma->cpu_buffer * dma->pointers_per_buf + dma->cpu_offset / PAGE_SIZE; chunkoffset = dma->cpu_offset % PAGE_SIZE; desc = dma->desc[chunkvpage]; chunksize = (desc->csr & LSDMA_DESC_CSR_TOTALXFERSIZE) - chunkoffset; if (chunksize > length) { chunksize = length; } dma_sync_single_for_cpu (dma->dev, mdma_desc_to_dma (desc->dest_addr, desc->dest_addr_h), (desc->csr & LSDMA_DESC_CSR_TOTALXFERSIZE), DMA_FROM_DEVICE); if (copy_to_user (data + copied, dma->vpage[chunkvpage] + chunkoffset, chunksize)) { dma_sync_single_for_device (dma->dev, mdma_desc_to_dma (desc->dest_addr, desc->dest_addr_h), (desc->csr & LSDMA_DESC_CSR_TOTALXFERSIZE), DMA_FROM_DEVICE); return -EFAULT; } dma_sync_single_for_device (dma->dev, mdma_desc_to_dma (desc->dest_addr, desc->dest_addr_h), (desc->csr & LSDMA_DESC_CSR_TOTALXFERSIZE), DMA_FROM_DEVICE); dma->cpu_offset += chunksize; copied += chunksize; length -= chunksize; } if (copied == max_length) { dma->cpu_buffer = (dma->cpu_buffer + 1) % dma->buffers; dma->cpu_offset = 0; } return copied; }
static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { unsigned int max_bitflips; struct denali_nand_info *denali = mtd_to_denali(mtd); dma_addr_t addr = denali->buf.dma_buf; size_t size = denali->mtd.writesize + denali->mtd.oobsize; uint32_t irq_status = 0; uint32_t irq_mask = INTR_STATUS__ECC_TRANSACTION_DONE | INTR_STATUS__ECC_ERR; bool check_erased_page = false; if (page != denali->page) { dev_err(denali->dev, "IN %s: page %d is not" " equal to denali->page %d, investigate!!", __func__, page, denali->page); BUG(); } setup_ecc_for_xfer(denali, true, false); denali_enable_dma(denali, true); dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE); clear_interrupts(denali); denali_setup_dma(denali, DENALI_READ); /* wait for operation to complete */ irq_status = wait_for_irq(denali, irq_mask); dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE); memcpy(buf, denali->buf.buf, mtd->writesize); check_erased_page = handle_ecc(denali, buf, irq_status, &max_bitflips); denali_enable_dma(denali, false); if (check_erased_page) { read_oob_data(&denali->mtd, chip->oob_poi, denali->page); /* check ECC failures that may have occurred on erased pages */ if (check_erased_page) { if (!is_erased(buf, denali->mtd.writesize)) denali->mtd.ecc_stats.failed++; if (!is_erased(buf, denali->mtd.oobsize)) denali->mtd.ecc_stats.failed++; } } return max_bitflips; }
static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) { int port = p->port; union cvmx_mixx_orcnt mix_orcnt; union mgmt_port_ring_entry re; struct sk_buff *skb; int cleaned = 0; unsigned long flags; mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); while (mix_orcnt.s.orcnt) { spin_lock_irqsave(&p->tx_list.lock, flags); mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); if (mix_orcnt.s.orcnt == 0) { spin_unlock_irqrestore(&p->tx_list.lock, flags); break; } dma_sync_single_for_cpu(p->dev, p->tx_ring_handle, ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), DMA_BIDIRECTIONAL); re.d64 = p->tx_ring[p->tx_next_clean]; p->tx_next_clean = (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE; skb = __skb_dequeue(&p->tx_list); mix_orcnt.u64 = 0; mix_orcnt.s.orcnt = 1; /* Acknowledge to hardware that we have the buffer. */ cvmx_write_csr(CVMX_MIXX_ORCNT(port), mix_orcnt.u64); p->tx_current_fill--; spin_unlock_irqrestore(&p->tx_list.lock, flags); dma_unmap_single(p->dev, re.s.addr, re.s.len, DMA_TO_DEVICE); dev_kfree_skb_any(skb); cleaned++; mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); } if (cleaned && netif_queue_stopped(p->netdev)) netif_wake_queue(p->netdev); }
/** * usb_buffer_dmasync - synchronize DMA and CPU view of buffer(s) * @urb: urb whose transfer_buffer/setup_packet will be synchronized */ void usb_buffer_dmasync(struct urb *urb) { struct usb_bus *bus; struct device *controller; if (!urb || !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) || !urb->dev || !(bus = urb->dev->bus) || !(controller = bus->controller)) return; if (controller->dma_mask) { dma_sync_single_for_cpu(controller, urb->transfer_dma, urb->transfer_buffer_length, usb_pipein(urb->pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); if (usb_pipecontrol(urb->pipe)) dma_sync_single_for_cpu(controller, urb->setup_dma, sizeof(struct usb_ctrlrequest), DMA_TO_DEVICE); } }
static void tegra_ehci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) { usb_hcd_unmap_urb_for_dma(hcd, urb); free_align_buffer(urb); if (urb->transfer_dma) { enum dma_data_direction dir; dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; if (dir == DMA_FROM_DEVICE) dma_sync_single_for_cpu(hcd->self.controller, urb->transfer_dma, urb->transfer_buffer_length, DMA_FROM_DEVICE); } }
/** * This function completes a request. It call's the request call back. */ void request_done(dwc_otg_pcd_ep_t * _ep, dwc_otg_pcd_request_t * _req, int _status) { unsigned stopped = _ep->stopped; DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _ep); if (_req->mapped) { dma_unmap_single(_ep->pcd->gadget.dev.parent, _req->req.dma, _req->req.length, _ep->dwc_ep.is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); _req->req.dma = DMA_ADDR_INVALID; _req->mapped = 0; } else dma_sync_single_for_cpu(_ep->pcd->gadget.dev.parent, _req->req.dma, _req->req.length, _ep->dwc_ep.is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); list_del_init(&_req->queue); if (_req->req.status == -EINPROGRESS) { _req->req.status = _status; } else { _status = _req->req.status; } /* don't modify queue heads during completion callback */ _ep->stopped = 1; SPIN_UNLOCK(&_ep->pcd->lock); _req->req.complete(&_ep->ep, &_req->req); SPIN_LOCK(&_ep->pcd->lock); if (_ep->pcd->request_pending > 0) { --_ep->pcd->request_pending; } _ep->stopped = stopped; #ifdef CONFIG_405EZ /* * Added-sr: 2007-07-26 * * Finally, when the current request is done, mark this endpoint * as not active, so that new requests can be processed. */ _ep->dwc_ep.active = 0; #endif }
static void omap_8250_dma_tx_complete(void *param) { struct uart_8250_port *p = param; struct uart_8250_dma *dma = p->dma; struct circ_buf *xmit = &p->port.state->xmit; unsigned long flags; bool en_thri = false; struct omap8250_priv *priv = p->port.private_data; dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr, UART_XMIT_SIZE, DMA_TO_DEVICE); spin_lock_irqsave(&p->port.lock, flags); dma->tx_running = 0; xmit->tail += dma->tx_size; xmit->tail &= UART_XMIT_SIZE - 1; p->port.icount.tx += dma->tx_size; if (priv->delayed_restore) { priv->delayed_restore = 0; omap8250_restore_regs(p); } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&p->port); if (!uart_circ_empty(xmit) && !uart_tx_stopped(&p->port)) { int ret; ret = omap_8250_tx_dma(p); if (ret) en_thri = true; } else if (p->capabilities & UART_CAP_RPM) { en_thri = true; } if (en_thri) { dma->tx_err = 1; p->ier |= UART_IER_THRI; serial_port_out(&p->port, UART_IER, p->ier); } spin_unlock_irqrestore(&p->port.lock, flags); }
static void write_page(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf, bool raw_xfer) { struct denali_nand_info *denali = mtd_to_denali(mtd); dma_addr_t addr = denali->buf.dma_buf; size_t size = denali->mtd.writesize + denali->mtd.oobsize; uint32_t irq_status = 0; uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP | INTR_STATUS__PROGRAM_FAIL; setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer); memcpy(denali->buf.buf, buf, mtd->writesize); if (raw_xfer) { memcpy(denali->buf.buf + mtd->writesize, chip->oob_poi, mtd->oobsize); } dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE); clear_interrupts(denali); denali_enable_dma(denali, true); denali_setup_dma(denali, DENALI_WRITE); irq_status = wait_for_irq(denali, irq_mask); if (irq_status == 0) { dev_err(denali->dev, "timeout on write_page (type = %d)\n", raw_xfer); denali->status = (irq_status & INTR_STATUS__PROGRAM_FAIL) ? NAND_STATUS_FAIL : PASS; } denali_enable_dma(denali, false); dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE); }
/* * It is expected that the callers take the UART lock when this API is called. * * There are 2 contexts when this function is called: * * 1. DMA ISR - DMA ISR triggers the threshold complete calback, which calls the * dequue API which in-turn calls this callback. UART lock is taken during * the call to the threshold callback. * * 2. UART ISR - UART calls the dequue API which in-turn will call this API. * In this case, UART ISR takes the UART lock. */ static void tegra_rx_dma_complete_callback(struct tegra_dma_req *req) { struct tegra_uart_port *t = req->dev; struct uart_port *u = &t->uport; struct tty_struct *tty = u->state->port.tty; int copied; /* If we are here, DMA is stopped */ dev_dbg(t->uport.dev, "%s: %d %d\n", __func__, req->bytes_transferred, req->status); if (req->bytes_transferred) { t->uport.icount.rx += req->bytes_transferred; dma_sync_single_for_cpu(t->uport.dev, req->dest_addr, req->size, DMA_FROM_DEVICE); copied = tty_insert_flip_string(tty, ((unsigned char *)(req->virt_addr)), req->bytes_transferred); if (copied != req->bytes_transferred) { WARN_ON(1); dev_err(t->uport.dev, "Not able to copy uart data " "to tty layer Req %d and coped %d\n", req->bytes_transferred, copied); } dma_sync_single_for_device(t->uport.dev, req->dest_addr, req->size, DMA_TO_DEVICE); } do_handle_rx_pio(t); /* Push the read data later in caller place. */ if (req->status == -TEGRA_DMA_REQ_ERROR_ABORTED) return; printk(KERN_ERR "%s: tegra_uart_%d: DMA_REQ: buf_stat(%d),stat(%d)\n", __func__, req->instance, req->buffer_status, req->status); /* Not out of sync err from dma_isr/dbl_dma */ if (req->status != -TEGRA_DMA_REQ_ERROR_STOPPED) spin_unlock(&u->lock); tty_flip_buffer_push(u->state->port.tty); /* Not out of sync err from dma_isr/dbl_dma */ if (req->status != -TEGRA_DMA_REQ_ERROR_STOPPED) spin_lock(&u->lock); }
/** * lsdma_rxdqbuf - Dequeue a read buffer * @dma: DMA information structure * @bufnum: buffer number * * Do everything which normally precedes a copy from a driver buffer * to a user buffer. **/ static ssize_t lsdma_rxdqbuf (struct master_dma *dma, size_t bufnum) { unsigned int i; struct lsdma_desc *desc; if (bufnum != dma->cpu_buffer) { return -EINVAL; } for (i = 0; i < dma->pointers_per_buf; i++) { desc = dma->desc[dma->cpu_buffer * dma->pointers_per_buf + i]; dma_sync_single_for_cpu (dma->dev, mdma_desc_to_dma (desc->dest_addr, desc->dest_addr_h), (desc->csr & LSDMA_DESC_CSR_TOTALXFERSIZE), DMA_FROM_DEVICE); } return dma->bufsize; }
static void __dma_rx_complete(void *param) { struct uart_8250_port *p = param; struct uart_8250_dma *dma = p->dma; struct tty_struct *tty = p->port.state->port.tty; struct dma_tx_state state; dma_sync_single_for_cpu(dma->rxchan->device->dev, dma->rx_addr, dma->rx_size, DMA_FROM_DEVICE); dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state); dmaengine_terminate_all(dma->rxchan); tty_insert_flip_string(tty, dma->rx_buf, dma->rx_size - state.residue); p->port.icount.rx += dma->rx_size - state.residue; tty_flip_buffer_push(tty); }