void cppi41_init_teardown_queue(int dma_num) { dma_addr_t td_addr; struct cppi41_teardown_desc *curr_td; u32 num_desc = dma_teardown[dma_num].num_desc; int i; curr_td = dma_teardown[dma_num].virt_addr; td_addr = dma_teardown[dma_num].phys_addr; for (i = 0; i < num_desc; i++) { cppi41_queue_push(&dma_teardown[dma_num].queue_obj, td_addr, sizeof(*curr_td), 0); td_addr += sizeof(*curr_td); } }
/** * cppi41_next_tx_segment - DMA write for the next chunk of a buffer * @tx_ch: Tx channel * * Context: controller IRQ-locked */ static unsigned cppi41_next_tx_segment(struct cppi41_channel *tx_ch) { struct cppi41 *cppi = tx_ch->channel.private_data; struct usb_pkt_desc *curr_pd; u32 length = tx_ch->length - tx_ch->curr_offset; u32 pkt_size = tx_ch->pkt_size; unsigned num_pds, n; struct usb_cppi41_info *cppi_info = cppi->cppi_info; u16 q_mgr = cppi_info->q_mgr; u16 tx_comp_q = cppi_info->tx_comp_q[tx_ch->ch_num]; u8 en_bd_intr = cppi->en_bd_intr; /* * Tx can use the generic RNDIS mode where we can probably fit this * transfer in one PD and one IRQ. The only time we would NOT want * to use it is when the hardware constraints prevent it... */ if ((pkt_size & 0x3f) == 0) { num_pds = length ? 1 : 0; cppi41_mode_update(tx_ch, USB_GENERIC_RNDIS_MODE); } else { num_pds = (length + pkt_size - 1) / pkt_size; cppi41_mode_update(tx_ch, USB_TRANSPARENT_MODE); } pkt_size = length; /* * If length of transmit buffer is 0 or a multiple of the endpoint size, * then send the zero length packet. */ if (!length || (tx_ch->transfer_mode && length % pkt_size == 0)) num_pds++; DBG(4, "TX DMA%u, %s, maxpkt %u, %u PDs, addr %#x, len %u\n", tx_ch->ch_num, tx_ch->dma_mode ? "accelerated" : "transparent", pkt_size, num_pds, tx_ch->start_addr + tx_ch->curr_offset, length); for (n = 0; n < num_pds; n++) { struct cppi41_host_pkt_desc *hw_desc; /* Get Tx host packet descriptor from the free pool */ curr_pd = usb_get_free_pd(cppi); if (curr_pd == NULL) { DBG(1, "No Tx PDs\n"); break; } if (length < pkt_size) pkt_size = length; hw_desc = &curr_pd->hw_desc; hw_desc->desc_info = (CPPI41_DESC_TYPE_HOST << CPPI41_DESC_TYPE_SHIFT) | pkt_size; hw_desc->tag_info = tx_ch->tag_info; hw_desc->pkt_info = cppi->pkt_info; hw_desc->pkt_info |= ((q_mgr << CPPI41_RETURN_QMGR_SHIFT) | (tx_comp_q << CPPI41_RETURN_QNUM_SHIFT)); hw_desc->buf_ptr = tx_ch->start_addr + tx_ch->curr_offset; hw_desc->buf_len = pkt_size; hw_desc->next_desc_ptr = 0; hw_desc->orig_buf_len = pkt_size; curr_pd->ch_num = tx_ch->ch_num; curr_pd->ep_num = tx_ch->end_pt->epnum; tx_ch->curr_offset += pkt_size; length -= pkt_size; if (pkt_size == 0) tx_ch->zlp_queued = 1; if (en_bd_intr) hw_desc->orig_buf_len |= CPPI41_PKT_INTR_FLAG; DBG(5, "TX PD %p: buf %08x, len %08x, pkt info %08x\n", curr_pd, hw_desc->buf_ptr, hw_desc->buf_len, hw_desc->pkt_info); cppi41_queue_push(&tx_ch->queue_obj, curr_pd->dma_addr, USB_CPPI41_DESC_ALIGN, pkt_size); } return n; }