コード例 #1
0
/**
 * cppi41_channel_alloc - allocate a CPPI channel for DMA.
 * @controller: the controller
 * @ep:		the endpoint
 * @is_tx:	1 for Tx channel, 0 for Rx channel
 *
 * With CPPI, channels are bound to each transfer direction of a non-control
 * endpoint, so allocating (and deallocating) is mostly a way to notice bad
 * housekeeping on the software side.  We assume the IRQs are always active.
 */
static struct dma_channel *cppi41_channel_alloc(struct dma_controller
						*controller,
						struct musb_hw_ep *ep, u8 is_tx)
{
	struct cppi41 *cppi;
	struct cppi41_channel  *cppi_ch;
	u32 ch_num, ep_num = ep->epnum;
	struct usb_cppi41_info *cppi_info;

	cppi = container_of(controller, struct cppi41, controller);
	cppi_info = cppi->cppi_info;

	/* Remember, ep_num: 1 .. Max_EP, and CPPI ch_num: 0 .. Max_EP - 1 */
	ch_num = ep_num - 1;

	if (ep_num > USB_CPPI41_NUM_CH) {
		DBG(1, "No %cx DMA channel for EP%d\n",
		    is_tx ? 'T' : 'R', ep_num);
		return NULL;
	}

	cppi_ch = (is_tx ? cppi->tx_cppi_ch : cppi->rx_cppi_ch) + ch_num;

	/* As of now, just return the corresponding CPPI 4.1 channel handle */
	if (is_tx) {
		/* Initialize the CPPI 4.1 Tx DMA channel */
		if (cppi41_tx_ch_init(&cppi_ch->dma_ch_obj,
				      cppi_info->dma_block,
				      cppi_info->ep_dma_ch[ch_num])) {
			DBG(1, "ERROR: cppi41_tx_ch_init failed for "
			    "channel %d\n", ch_num);
			return NULL;
		}
		/*
		 * Teardown descriptors will be pushed to the dedicated
		 * completion queue.
		 */
		cppi41_dma_ch_default_queue(&cppi_ch->dma_ch_obj,
					    0, cppi->teardownQNum);
	} else {
		struct cppi41_rx_ch_cfg rx_cfg;
		u8 q_mgr = cppi_info->q_mgr;
		int i;

		/* Initialize the CPPI 4.1 Rx DMA channel */
		if (cppi41_rx_ch_init(&cppi_ch->dma_ch_obj,
				      cppi_info->dma_block,
				      cppi_info->ep_dma_ch[ch_num])) {
			DBG(1, "ERROR: cppi41_rx_ch_init failed\n");
			return NULL;
		}

		if (cppi41_queue_alloc(CPPI41_FREE_DESC_BUF_QUEUE |
				       CPPI41_UNASSIGNED_QUEUE,
				       q_mgr, &cppi_ch->src_queue.q_num)) {
			DBG(1, "ERROR: cppi41_queue_alloc failed for "
			    "free descriptor/buffer queue\n");
			return NULL;
		}
		DBG(4, "Allocated free descriptor/buffer queue %d in "
		    "queue manager %d\n", cppi_ch->src_queue.q_num, q_mgr);

		rx_cfg.default_desc_type = cppi41_rx_host_desc;
		rx_cfg.sop_offset = 0;
		rx_cfg.retry_starved = 1;
		rx_cfg.rx_max_buf_cnt = 0;
		rx_cfg.rx_queue.q_mgr = cppi_ch->src_queue.q_mgr = q_mgr;
		rx_cfg.rx_queue.q_num = cppi_info->rx_comp_q[ch_num];
		for (i = 0; i < 4; i++)
			rx_cfg.cfg.host_pkt.fdb_queue[i] = cppi_ch->src_queue;
		cppi41_rx_ch_configure(&cppi_ch->dma_ch_obj, &rx_cfg);
	}

	/* Initialize the CPPI 4.1 DMA source queue */
	if (cppi41_queue_init(&cppi_ch->queue_obj, cppi_ch->src_queue.q_mgr,
			       cppi_ch->src_queue.q_num)) {
		DBG(1, "ERROR: cppi41_queue_init failed for %s queue",
		    is_tx ? "Tx" : "Rx free descriptor/buffer");
		if (is_tx == 0 &&
		    cppi41_queue_free(cppi_ch->src_queue.q_mgr,
				      cppi_ch->src_queue.q_num))
			DBG(1, "ERROR: failed to free Rx descriptor/buffer "
			    "queue\n");
		 return NULL;
	}

	/* Enable the DMA channel */
	cppi41_dma_ch_enable(&cppi_ch->dma_ch_obj);

	if (cppi_ch->end_pt)
		DBG(1, "Re-allocating DMA %cx channel %d (%p)\n",
		    is_tx ? 'T' : 'R', ch_num, cppi_ch);

	cppi_ch->end_pt = ep;
	cppi_ch->ch_num = ch_num;
	cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
	cppi_ch->channel.max_len = is_tx ?
				CPPI41_TXDMA_MAXLEN : CPPI41_RXDMA_MAXLEN;

	DBG(4, "Allocated DMA %cx channel %d for EP%d\n", is_tx ? 'T' : 'R',
	    ch_num, ep_num);

	return &cppi_ch->channel;
}
コード例 #2
0
int cppi41_dma_block_init(u8 dma_num, u8 q_mgr, u8 num_order,
                          u32 *sched_tbl, u8 tbl_size)
{
    const struct cppi41_dma_block *dma_block;
    unsigned num_desc, num_reg;
    void *ptr;
    int error, i;
    u16 q_num;
    u32 val;

    if (dma_num >= cppi41_num_dma_block ||
            q_mgr >= cppi41_num_queue_mgr ||
            !tbl_size || sched_tbl == NULL)
        return -EINVAL;

    error = cppi41_queue_alloc(CPPI41_FREE_DESC_QUEUE |
                               CPPI41_UNASSIGNED_QUEUE, q_mgr, &q_num);
    if (error) {
        printk(KERN_ERR "ERROR: %s: Unable to allocate teardown "
               "descriptor queue.\n", __func__);
        return error;
    }
    DBG("Teardown descriptor queue %d in queue manager 0 "
        "allocated\n", q_num);

    /*
     * Tell the hardware about the Teardown descriptor
     * queue manager and queue number.
     */
    dma_block = &cppi41_dma_block[dma_num];
    cppi_writel((q_mgr << DMA_TD_DESC_QMGR_SHIFT) |
                (q_num << DMA_TD_DESC_QNUM_SHIFT),
                dma_block->global_ctrl_base +
                DMA_TEARDOWN_FREE_DESC_CTRL_REG);
    DBG("Teardown free descriptor control @ %p, value: %x\n",
        dma_block->global_ctrl_base + DMA_TEARDOWN_FREE_DESC_CTRL_REG,
        cppi_readl(dma_block->global_ctrl_base +
                   DMA_TEARDOWN_FREE_DESC_CTRL_REG));

    num_desc = 1 << num_order;
    dma_teardown[dma_num].rgn_size = num_desc *
                                     sizeof(struct cppi41_teardown_desc);

    /* Pre-allocate teardown descriptors. */
    ptr = dma_alloc_coherent(NULL, dma_teardown[dma_num].rgn_size,
                             &dma_teardown[dma_num].phys_addr,
                             GFP_KERNEL | GFP_DMA);
    if (ptr == NULL) {
        printk(KERN_ERR "ERROR: %s: Unable to allocate teardown "
               "descriptors.\n", __func__);
        error = -ENOMEM;
        goto free_queue;
    }
    dma_teardown[dma_num].virt_addr = ptr;

    error = cppi41_mem_rgn_alloc(q_mgr, dma_teardown[dma_num].phys_addr, 5,
                                 num_order, &dma_teardown[dma_num].mem_rgn);
    if (error) {
        printk(KERN_ERR "ERROR: %s: Unable to allocate queue manager "
               "memory region for teardown descriptors.\n", __func__);
        goto free_mem;
    }

    error = cppi41_queue_init(&dma_teardown[dma_num].queue_obj, 0, q_num);
    if (error) {
        printk(KERN_ERR "ERROR: %s: Unable to initialize teardown "
               "free descriptor queue.\n", __func__);
        goto free_rgn;
    }

    dma_teardown[dma_num].q_num = q_num;
    dma_teardown[dma_num].q_mgr = q_mgr;
    dma_teardown[dma_num].num_desc = num_desc;
    /*
     * Push all teardown descriptors to the free teardown queue
     * for the CPPI 4.1 system.
     */
    cppi41_init_teardown_queue(dma_num);

    /* Initialize the DMA scheduler. */
    num_reg = (tbl_size + 3) / 4;
    for (i = 0; i < num_reg; i++) {
        val = sched_tbl[i];
        cppi_writel(val, dma_block->sched_table_base +
                    DMA_SCHED_TABLE_WORD_REG(i));
        DBG("DMA scheduler table @ %p, value written: %x\n",
            dma_block->sched_table_base + DMA_SCHED_TABLE_WORD_REG(i),
            val);
    }

    cppi_writel((tbl_size - 1) << DMA_SCHED_LAST_ENTRY_SHIFT |
                DMA_SCHED_ENABLE_MASK,
                dma_block->sched_ctrl_base + DMA_SCHED_CTRL_REG);
    DBG("DMA scheduler control @ %p, value: %x\n",
        dma_block->sched_ctrl_base + DMA_SCHED_CTRL_REG,
        cppi_readl(dma_block->sched_ctrl_base + DMA_SCHED_CTRL_REG));

    return 0;

free_rgn:
    cppi41_mem_rgn_free(q_mgr, dma_teardown[dma_num].mem_rgn);
free_mem:
    dma_free_coherent(NULL, dma_teardown[dma_num].rgn_size,
                      dma_teardown[dma_num].virt_addr,
                      dma_teardown[dma_num].phys_addr);
free_queue:
    cppi41_queue_free(q_mgr, q_num);
    return error;
}
コード例 #3
0
/**
 * cppi41_controller_start - start DMA controller
 * @controller: the controller
 *
 * This function initializes the CPPI 4.1 Tx/Rx channels.
 */
static int __init cppi41_controller_start(struct dma_controller *controller)
{
	struct cppi41 *cppi;
	struct cppi41_channel *cppi_ch;
	void __iomem *reg_base;
	struct usb_pkt_desc *curr_pd;
	unsigned long pd_addr;
	int i;
	struct usb_cppi41_info *cppi_info;

	cppi = container_of(controller, struct cppi41, controller);
	cppi_info = cppi->cppi_info;

	cppi->automode_reg_offs = USB_AUTOREQ_REG;
	cppi->teardown_reg_offs = USB_TEARDOWN_REG;

	/*
	 * TODO: We may need to check USB_CPPI41_MAX_PD here since CPPI 4.1
	 * requires the descriptor count to be a multiple of 2 ^ 5 (i.e. 32).
	 * Similarly, the descriptor size should also be a multiple of 32.
	 */

	/*
	 * Allocate free packet descriptor pool for all Tx/Rx endpoints --
	 * dma_alloc_coherent()  will return a page aligned address, so our
	 * alignment requirement will be honored.
	 */
	cppi->bd_size = USB_CPPI41_MAX_PD * sizeof(struct usb_pkt_desc);
	cppi->pd_mem = dma_alloc_coherent(cppi->musb->controller,
					  cppi->bd_size,
					  &cppi->pd_mem_phys,
					  GFP_KERNEL | GFP_DMA);
	if (cppi->pd_mem == NULL) {
		DBG(1, "ERROR: packet descriptor memory allocation failed\n");
		return 0;
	}

	if (cppi41_mem_rgn_alloc(cppi_info->q_mgr, cppi->pd_mem_phys,
				 USB_CPPI41_DESC_SIZE_SHIFT,
				 get_count_order(USB_CPPI41_MAX_PD),
				 &cppi->pd_mem_rgn)) {
		DBG(1, "ERROR: queue manager memory region allocation "
		    "failed\n");
		goto free_pds;
	}

	/* Allocate the teardown completion queue */
	if (cppi41_queue_alloc(CPPI41_UNASSIGNED_QUEUE,
			       0, &cppi->teardownQNum)) {
		DBG(1, "ERROR: teardown completion queue allocation failed\n");
		goto free_mem_rgn;
	}
	DBG(4, "Allocated teardown completion queue %d in queue manager 0\n",
	    cppi->teardownQNum);

	if (cppi41_queue_init(&cppi->queue_obj, 0, cppi->teardownQNum)) {
		DBG(1, "ERROR: teardown completion queue initialization "
		    "failed\n");
		goto free_queue;
	}

	/*
	 * "Slice" PDs one-by-one from the big chunk and
	 * add them to the free pool.
	 */
	curr_pd = (struct usb_pkt_desc *)cppi->pd_mem;
	pd_addr = cppi->pd_mem_phys;
	for (i = 0; i < USB_CPPI41_MAX_PD; i++) {
		curr_pd->dma_addr = pd_addr;

		usb_put_free_pd(cppi, curr_pd);
		curr_pd = (struct usb_pkt_desc *)((char *)curr_pd +
						  USB_CPPI41_DESC_ALIGN);
		pd_addr += USB_CPPI41_DESC_ALIGN;
	}

	/* Configure the Tx channels */
	for (i = 0, cppi_ch = cppi->tx_cppi_ch;
	     i < ARRAY_SIZE(cppi->tx_cppi_ch); ++i, ++cppi_ch) {
		const struct cppi41_tx_ch *tx_info;

		memset(cppi_ch, 0, sizeof(struct cppi41_channel));
		cppi_ch->transmit = 1;
		cppi_ch->ch_num = i;
		cppi_ch->channel.private_data = cppi;

		/*
		 * Extract the CPPI 4.1 DMA Tx channel configuration and
		 * construct/store the Tx PD tag info field for later use...
		 */
		tx_info = cppi41_dma_block[cppi_info->dma_block].tx_ch_info
			  + cppi_info->ep_dma_ch[i];
		cppi_ch->src_queue = tx_info->tx_queue[0];
		cppi_ch->tag_info = (tx_info->port_num <<
				     CPPI41_SRC_TAG_PORT_NUM_SHIFT) |
				    (tx_info->ch_num <<
				     CPPI41_SRC_TAG_CH_NUM_SHIFT) |
				    (tx_info->sub_ch_num <<
				     CPPI41_SRC_TAG_SUB_CH_NUM_SHIFT);
	}

	/* Configure the Rx channels */
	for (i = 0, cppi_ch = cppi->rx_cppi_ch;
	     i < ARRAY_SIZE(cppi->rx_cppi_ch); ++i, ++cppi_ch) {
		memset(cppi_ch, 0, sizeof(struct cppi41_channel));
		cppi_ch->ch_num = i;
		cppi_ch->channel.private_data = cppi;
	}

	/* Construct/store Tx PD packet info field for later use */
	cppi->pkt_info = (CPPI41_PKT_TYPE_USB << CPPI41_PKT_TYPE_SHIFT) |
			 (CPPI41_RETURN_LINKED << CPPI41_RETURN_POLICY_SHIFT);

	/* Do necessary configuartion in hardware to get started */
	reg_base = cppi->musb->ctrl_base;

	/* Disable auto request mode */
	musb_writel(reg_base, cppi->automode_reg_offs, 0);

	/* Disable the CDC/RNDIS modes */
	musb_writel(reg_base, USB_TX_MODE_REG, 0);
	musb_writel(reg_base, USB_RX_MODE_REG, 0);

	return 1;

 free_queue:
	if (cppi41_queue_free(0, cppi->teardownQNum))
		DBG(1, "ERROR: failed to free teardown completion queue\n");

 free_mem_rgn:
	if (cppi41_mem_rgn_free(cppi_info->q_mgr, cppi->pd_mem_rgn))
		DBG(1, "ERROR: failed to free queue manager memory region\n");

 free_pds:
	dma_free_coherent(cppi->musb->controller,
			  cppi->bd_size,
			  cppi->pd_mem, cppi->pd_mem_phys);

	return 0;
}