static void sata_dwc_port_stop(struct ata_port *ap)
{
    int i;
    struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
    struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);

    dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id);

    if (hsdevp && hsdev) {

        for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
            dma_free_coherent(ap->host->dev,
                              SATA_DWC_DMAC_LLI_TBL_SZ,
                              hsdevp->llit[i], hsdevp->llit_dma[i]);
        }

        kfree(hsdevp);
    }
    ap->private_data = NULL;
}
static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
{
    struct ata_queued_cmd *qc;
    struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
    struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
    u8 tag = 0;

    tag = ap->link.active_tag;
    qc = ata_qc_from_tag(ap, tag);
    if (!qc) {
        dev_err(ap->dev, "failed to get qc");
        return;
    }

#ifdef DEBUG_NCQ
    if (tag > 0) {
        dev_info(ap->dev, "%s tag=%u cmd=0x%02x dma dir=%s proto=%s "
                 "dmacr=0x%08x\n", __func__, qc->tag, qc->tf.command,
                 get_dma_dir_descript(qc->dma_dir),
                 get_prot_descript(qc->tf.protocol),
                 in_le32(&(hsdev->sata_dwc_regs->dmacr)));
    }
#endif

    if (ata_is_dma(qc->tf.protocol)) {
        if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
            dev_err(ap->dev, "%s DMA protocol RX and TX DMA not "
                    "pending dmacr: 0x%08x\n", __func__,
                    in_le32(&(hsdev->sata_dwc_regs->dmacr)));
        }

        hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
        sata_dwc_qc_complete(ap, qc, check_status);
        ap->link.active_tag = ATA_TAG_POISON;
    } else {
        sata_dwc_qc_complete(ap, qc, check_status);
    }
}
static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
{
    struct scatterlist *sg = qc->sg;
    struct ata_port *ap = qc->ap;
    int dma_chan;
    struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
    struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);

    dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n",
            __func__, ap->port_no, get_dma_dir_descript(qc->dma_dir),
            qc->n_elem);

    dma_chan = dma_dwc_xfer_setup(sg, qc->n_elem, hsdevp->llit[tag],
                                  hsdevp->llit_dma[tag],
                                  (void *__iomem)(&hsdev->sata_dwc_regs->\
                                          dmadr), qc->dma_dir);
    if (dma_chan < 0) {
        dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n",
                __func__, dma_chan);
        return;
    }
    hsdevp->dma_chan[tag] = dma_chan;
}
static int sata_dwc_port_start(struct ata_port *ap)
{
    int err = 0;
    struct sata_dwc_device *hsdev;
    struct sata_dwc_device_port *hsdevp = NULL;
    struct device *pdev;
    int i;

    hsdev = HSDEV_FROM_AP(ap);

    dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no);

    hsdev->host = ap->host;
    pdev = ap->host->dev;
    if (!pdev) {
        dev_err(ap->dev, "%s: no ap->host->dev\n", __func__);
        err = -ENODEV;
        goto CLEANUP;
    }


    hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL);
    if (!hsdevp) {
        dev_err(ap->dev, "%s: kmalloc failed for hsdevp\n", __func__);
        err = -ENOMEM;
        goto CLEANUP;
    }
    hsdevp->hsdev = hsdev;

    for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
        hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;

    ap->bmdma_prd = 0;
    ap->bmdma_prd_dma = 0;

    for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
        hsdevp->llit[i] = dma_alloc_coherent(pdev,
                                             SATA_DWC_DMAC_LLI_TBL_SZ,
                                             &(hsdevp->llit_dma[i]),
                                             GFP_ATOMIC);
        if (!hsdevp->llit[i]) {
            dev_err(ap->dev, "%s: dma_alloc_coherent failed\n",
                    __func__);
            err = -ENOMEM;
            goto CLEANUP_ALLOC;
        }
    }

    if (ap->port_no == 0)  {
        dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
                __func__);
        out_le32(&hsdev->sata_dwc_regs->dmacr,
                 SATA_DWC_DMACR_TXRXCH_CLEAR);

        dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n",
                __func__);
        out_le32(&hsdev->sata_dwc_regs->dbtsr,
                 (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
                  SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
    }


    clear_serror();
    ap->private_data = hsdevp;
    dev_dbg(ap->dev, "%s: done\n", __func__);
    return 0;

CLEANUP_ALLOC:
    kfree(hsdevp);
CLEANUP:
    dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id);
    return err;
}
示例#5
0
/*
 * Function : sata_dwc_port_start
 * arguments : struct ata_ioports *port
 * Return value : returns 0 if success, error code otherwise
 * This function allocates the scatter gather LLI table for AHB DMA
 */
static int sata_dwc_port_start(struct ata_port *ap)
{
	int err = 0;
	struct sata_dwc_device *hsdev;
	struct sata_dwc_device_port *hsdevp = NULL;
	struct device *pdev;
	int i;

	hsdev = HSDEV_FROM_AP(ap);

	dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no);

	hsdev->host = ap->host;
	pdev = ap->host->dev;
	if (!pdev) {
		dev_err(ap->dev, "%s: no ap->host->dev\n", __func__);
		err = -ENODEV;
		goto CLEANUP;
	}

	/* Allocate Port Struct */
	hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL);
	if (!hsdevp) {
		dev_err(ap->dev, "%s: kmalloc failed for hsdevp\n", __func__);
		err = -ENOMEM;
		goto CLEANUP;
	}
	hsdevp->hsdev = hsdev;

	for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
		hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;

	ap->bmdma_prd = 0;	/* set these so libata doesn't use them */
	ap->bmdma_prd_dma = 0;

	/*
	 * DMA - Assign scatter gather LLI table. We can't use the libata
	 * version since it's PRD is IDE PCI specific.
	 */
	for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
		hsdevp->llit[i] = dma_alloc_coherent(pdev,
						     SATA_DWC_DMAC_LLI_TBL_SZ,
						     &(hsdevp->llit_dma[i]),
						     GFP_ATOMIC);
		if (!hsdevp->llit[i]) {
			dev_err(ap->dev, "%s: dma_alloc_coherent failed\n",
				 __func__);
			err = -ENOMEM;
			goto CLEANUP_ALLOC;
		}
	}

	if (ap->port_no == 0)  {
		dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
			__func__);
		out_le32(&hsdev->sata_dwc_regs->dmacr,
			 SATA_DWC_DMACR_TXRXCH_CLEAR);

		dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n",
			 __func__);
		out_le32(&hsdev->sata_dwc_regs->dbtsr,
			 (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
			  SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
	}

	/* Clear any error bits before libata starts issuing commands */
	clear_serror();
	ap->private_data = hsdevp;
	dev_dbg(ap->dev, "%s: done\n", __func__);
	return 0;

CLEANUP_ALLOC:
	kfree(hsdevp);
CLEANUP:
	dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id);
	return err;
}