Beispiel #1
0
int cppi41_dma_block_init(u8 dma_num, u8 q_mgr, u8 num_order,
                          u32 *sched_tbl, u8 tbl_size)
{
    const struct cppi41_dma_block *dma_block;
    unsigned num_desc, num_reg;
    void *ptr;
    int error, i;
    u16 q_num;
    u32 val;

    if (dma_num >= cppi41_num_dma_block ||
            q_mgr >= cppi41_num_queue_mgr ||
            !tbl_size || sched_tbl == NULL)
        return -EINVAL;

    error = cppi41_queue_alloc(CPPI41_FREE_DESC_QUEUE |
                               CPPI41_UNASSIGNED_QUEUE, q_mgr, &q_num);
    if (error) {
        printk(KERN_ERR "ERROR: %s: Unable to allocate teardown "
               "descriptor queue.\n", __func__);
        return error;
    }
    DBG("Teardown descriptor queue %d in queue manager 0 "
        "allocated\n", q_num);

    /*
     * Tell the hardware about the Teardown descriptor
     * queue manager and queue number.
     */
    dma_block = &cppi41_dma_block[dma_num];
    cppi_writel((q_mgr << DMA_TD_DESC_QMGR_SHIFT) |
                (q_num << DMA_TD_DESC_QNUM_SHIFT),
                dma_block->global_ctrl_base +
                DMA_TEARDOWN_FREE_DESC_CTRL_REG);
    DBG("Teardown free descriptor control @ %p, value: %x\n",
        dma_block->global_ctrl_base + DMA_TEARDOWN_FREE_DESC_CTRL_REG,
        cppi_readl(dma_block->global_ctrl_base +
                   DMA_TEARDOWN_FREE_DESC_CTRL_REG));

    num_desc = 1 << num_order;
    dma_teardown[dma_num].rgn_size = num_desc *
                                     sizeof(struct cppi41_teardown_desc);

    /* Pre-allocate teardown descriptors. */
    ptr = dma_alloc_coherent(NULL, dma_teardown[dma_num].rgn_size,
                             &dma_teardown[dma_num].phys_addr,
                             GFP_KERNEL | GFP_DMA);
    if (ptr == NULL) {
        printk(KERN_ERR "ERROR: %s: Unable to allocate teardown "
               "descriptors.\n", __func__);
        error = -ENOMEM;
        goto free_queue;
    }
    dma_teardown[dma_num].virt_addr = ptr;

    error = cppi41_mem_rgn_alloc(q_mgr, dma_teardown[dma_num].phys_addr, 5,
                                 num_order, &dma_teardown[dma_num].mem_rgn);
    if (error) {
        printk(KERN_ERR "ERROR: %s: Unable to allocate queue manager "
               "memory region for teardown descriptors.\n", __func__);
        goto free_mem;
    }

    error = cppi41_queue_init(&dma_teardown[dma_num].queue_obj, 0, q_num);
    if (error) {
        printk(KERN_ERR "ERROR: %s: Unable to initialize teardown "
               "free descriptor queue.\n", __func__);
        goto free_rgn;
    }

    dma_teardown[dma_num].q_num = q_num;
    dma_teardown[dma_num].q_mgr = q_mgr;
    dma_teardown[dma_num].num_desc = num_desc;
    /*
     * Push all teardown descriptors to the free teardown queue
     * for the CPPI 4.1 system.
     */
    cppi41_init_teardown_queue(dma_num);

    /* Initialize the DMA scheduler. */
    num_reg = (tbl_size + 3) / 4;
    for (i = 0; i < num_reg; i++) {
        val = sched_tbl[i];
        cppi_writel(val, dma_block->sched_table_base +
                    DMA_SCHED_TABLE_WORD_REG(i));
        DBG("DMA scheduler table @ %p, value written: %x\n",
            dma_block->sched_table_base + DMA_SCHED_TABLE_WORD_REG(i),
            val);
    }

    cppi_writel((tbl_size - 1) << DMA_SCHED_LAST_ENTRY_SHIFT |
                DMA_SCHED_ENABLE_MASK,
                dma_block->sched_ctrl_base + DMA_SCHED_CTRL_REG);
    DBG("DMA scheduler control @ %p, value: %x\n",
        dma_block->sched_ctrl_base + DMA_SCHED_CTRL_REG,
        cppi_readl(dma_block->sched_ctrl_base + DMA_SCHED_CTRL_REG));

    return 0;

free_rgn:
    cppi41_mem_rgn_free(q_mgr, dma_teardown[dma_num].mem_rgn);
free_mem:
    dma_free_coherent(NULL, dma_teardown[dma_num].rgn_size,
                      dma_teardown[dma_num].virt_addr,
                      dma_teardown[dma_num].phys_addr);
free_queue:
    cppi41_queue_free(q_mgr, q_num);
    return error;
}
/**
 * cppi41_controller_start - start DMA controller
 * @controller: the controller
 *
 * This function initializes the CPPI 4.1 Tx/Rx channels.
 */
static int __init cppi41_controller_start(struct dma_controller *controller)
{
	struct cppi41 *cppi;
	struct cppi41_channel *cppi_ch;
	void __iomem *reg_base;
	struct usb_pkt_desc *curr_pd;
	unsigned long pd_addr;
	int i;
	struct usb_cppi41_info *cppi_info;

	cppi = container_of(controller, struct cppi41, controller);
	cppi_info = cppi->cppi_info;

	cppi->automode_reg_offs = USB_AUTOREQ_REG;
	cppi->teardown_reg_offs = USB_TEARDOWN_REG;

	/*
	 * TODO: We may need to check USB_CPPI41_MAX_PD here since CPPI 4.1
	 * requires the descriptor count to be a multiple of 2 ^ 5 (i.e. 32).
	 * Similarly, the descriptor size should also be a multiple of 32.
	 */

	/*
	 * Allocate free packet descriptor pool for all Tx/Rx endpoints --
	 * dma_alloc_coherent()  will return a page aligned address, so our
	 * alignment requirement will be honored.
	 */
	cppi->bd_size = USB_CPPI41_MAX_PD * sizeof(struct usb_pkt_desc);
	cppi->pd_mem = dma_alloc_coherent(cppi->musb->controller,
					  cppi->bd_size,
					  &cppi->pd_mem_phys,
					  GFP_KERNEL | GFP_DMA);
	if (cppi->pd_mem == NULL) {
		DBG(1, "ERROR: packet descriptor memory allocation failed\n");
		return 0;
	}

	if (cppi41_mem_rgn_alloc(cppi_info->q_mgr, cppi->pd_mem_phys,
				 USB_CPPI41_DESC_SIZE_SHIFT,
				 get_count_order(USB_CPPI41_MAX_PD),
				 &cppi->pd_mem_rgn)) {
		DBG(1, "ERROR: queue manager memory region allocation "
		    "failed\n");
		goto free_pds;
	}

	/* Allocate the teardown completion queue */
	if (cppi41_queue_alloc(CPPI41_UNASSIGNED_QUEUE,
			       0, &cppi->teardownQNum)) {
		DBG(1, "ERROR: teardown completion queue allocation failed\n");
		goto free_mem_rgn;
	}
	DBG(4, "Allocated teardown completion queue %d in queue manager 0\n",
	    cppi->teardownQNum);

	if (cppi41_queue_init(&cppi->queue_obj, 0, cppi->teardownQNum)) {
		DBG(1, "ERROR: teardown completion queue initialization "
		    "failed\n");
		goto free_queue;
	}

	/*
	 * "Slice" PDs one-by-one from the big chunk and
	 * add them to the free pool.
	 */
	curr_pd = (struct usb_pkt_desc *)cppi->pd_mem;
	pd_addr = cppi->pd_mem_phys;
	for (i = 0; i < USB_CPPI41_MAX_PD; i++) {
		curr_pd->dma_addr = pd_addr;

		usb_put_free_pd(cppi, curr_pd);
		curr_pd = (struct usb_pkt_desc *)((char *)curr_pd +
						  USB_CPPI41_DESC_ALIGN);
		pd_addr += USB_CPPI41_DESC_ALIGN;
	}

	/* Configure the Tx channels */
	for (i = 0, cppi_ch = cppi->tx_cppi_ch;
	     i < ARRAY_SIZE(cppi->tx_cppi_ch); ++i, ++cppi_ch) {
		const struct cppi41_tx_ch *tx_info;

		memset(cppi_ch, 0, sizeof(struct cppi41_channel));
		cppi_ch->transmit = 1;
		cppi_ch->ch_num = i;
		cppi_ch->channel.private_data = cppi;

		/*
		 * Extract the CPPI 4.1 DMA Tx channel configuration and
		 * construct/store the Tx PD tag info field for later use...
		 */
		tx_info = cppi41_dma_block[cppi_info->dma_block].tx_ch_info
			  + cppi_info->ep_dma_ch[i];
		cppi_ch->src_queue = tx_info->tx_queue[0];
		cppi_ch->tag_info = (tx_info->port_num <<
				     CPPI41_SRC_TAG_PORT_NUM_SHIFT) |
				    (tx_info->ch_num <<
				     CPPI41_SRC_TAG_CH_NUM_SHIFT) |
				    (tx_info->sub_ch_num <<
				     CPPI41_SRC_TAG_SUB_CH_NUM_SHIFT);
	}

	/* Configure the Rx channels */
	for (i = 0, cppi_ch = cppi->rx_cppi_ch;
	     i < ARRAY_SIZE(cppi->rx_cppi_ch); ++i, ++cppi_ch) {
		memset(cppi_ch, 0, sizeof(struct cppi41_channel));
		cppi_ch->ch_num = i;
		cppi_ch->channel.private_data = cppi;
	}

	/* Construct/store Tx PD packet info field for later use */
	cppi->pkt_info = (CPPI41_PKT_TYPE_USB << CPPI41_PKT_TYPE_SHIFT) |
			 (CPPI41_RETURN_LINKED << CPPI41_RETURN_POLICY_SHIFT);

	/* Do necessary configuartion in hardware to get started */
	reg_base = cppi->musb->ctrl_base;

	/* Disable auto request mode */
	musb_writel(reg_base, cppi->automode_reg_offs, 0);

	/* Disable the CDC/RNDIS modes */
	musb_writel(reg_base, USB_TX_MODE_REG, 0);
	musb_writel(reg_base, USB_RX_MODE_REG, 0);

	return 1;

 free_queue:
	if (cppi41_queue_free(0, cppi->teardownQNum))
		DBG(1, "ERROR: failed to free teardown completion queue\n");

 free_mem_rgn:
	if (cppi41_mem_rgn_free(cppi_info->q_mgr, cppi->pd_mem_rgn))
		DBG(1, "ERROR: failed to free queue manager memory region\n");

 free_pds:
	dma_free_coherent(cppi->musb->controller,
			  cppi->bd_size,
			  cppi->pd_mem, cppi->pd_mem_phys);

	return 0;
}