void *dma_generic_alloc_coherent(struct device *dev, size_t size,
				 dma_addr_t *dma_handle, gfp_t gfp)
{
	void *ret, *ret_nocache;
	int order = get_order(size);

	ret = (void *)__get_free_pages(gfp, order);
	if (!ret)
		return NULL;

	memset(ret, 0, size);
	/*
	 * Pages from the page allocator may have data present in
	 * cache. So flush the cache before using uncached memory.
	 */
	dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL);

	ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
	if (!ret_nocache) {
		free_pages((unsigned long)ret, order);
		return NULL;
	}

	split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);

	*dma_handle = virt_to_phys(ret);

	return ret_nocache;
}
Exemple #2
0
static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
				 unsigned long offset, size_t size,
				 enum dma_data_direction dir,
				 unsigned long attrs)
{
	dma_addr_t addr = page_to_phys(page) + offset;

	WARN_ON(size == 0);
	dma_cache_sync(dev, page_address(page) + offset, size, dir);

	return addr;
}
Exemple #3
0
void fb_present(struct fb_t * fb, struct render_t * render)
{
	struct fb_rk3288_pdata_t * pdat = (struct fb_rk3288_pdata_t *)fb->priv;

	if(render && render->pixels)
	{
		pdat->index = (pdat->index + 1) & 0x1;
		memcpy(pdat->vram[pdat->index], render->pixels, render->pixlen);
		dma_cache_sync(pdat->vram[pdat->index], render->pixlen, DMA_TO_DEVICE);
		rk3288_vop_set_win0_address(pdat, pdat->vram[pdat->index]);
		rk3288_vop_update_config(pdat);
	}
}
Exemple #4
0
static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
			int nents, enum dma_data_direction dir,
			unsigned long attrs)
{
	struct scatterlist *s;
	int i;

	WARN_ON(nents == 0 || sg[0].length == 0);

	for_each_sg(sg, s, nents, i) {
		BUG_ON(!sg_page(s));

		dma_cache_sync(dev, sg_virt(s), s->length, dir);

		s->dma_address = sg_phys(s);
		s->dma_length = s->length;
	}
Exemple #5
0
static void fb_present(struct framebuffer_t * fb, struct render_t * render, struct region_list_t * rl)
{
	struct fb_f1c500s_pdata_t * pdat = (struct fb_f1c500s_pdata_t *)fb->priv;
	struct region_list_t * nrl = pdat->nrl;

	region_list_clear(nrl);
	region_list_merge(nrl, pdat->orl);
	region_list_merge(nrl, rl);
	region_list_clone(pdat->orl, rl);

	pdat->index = (pdat->index + 1) & 0x1;
	if(nrl->count > 0)
		present_render(pdat->vram[pdat->index], render, nrl);
	else
		memcpy(pdat->vram[pdat->index], render->pixels, render->pixlen);
	dma_cache_sync(pdat->vram[pdat->index], render->pixlen, DMA_TO_DEVICE);
	f1c500s_debe_set_address(pdat, pdat->vram[pdat->index]);
}
Exemple #6
0
void *dma_alloc_coherent(struct device *dev, size_t size,
               dma_addr_t *dma_handle, gfp_t gfp)
{
    void *ret, *ret_nocache;
    struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
    int order = get_order(size);

    if (mem) {
        int page = bitmap_find_free_region(mem->bitmap, mem->size,
                             order);
        if (page >= 0) {
            *dma_handle = mem->device_base + (page << PAGE_SHIFT);
            ret = mem->virt_base + (page << PAGE_SHIFT);
            memset(ret, 0, size);
            return ret;
        }
        if (mem->flags & DMA_MEMORY_EXCLUSIVE)
            return NULL;
    }

    ret = (void *)__get_free_pages(gfp, order);
    if (!ret)
        return NULL;

    memset(ret, 0, size);
    /*
     * Pages from the page allocator may have data present in
     * cache. So flush the cache before using uncached memory.
     */
    dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL);

    ret_nocache = ioremap_nocache(virt_to_phys(ret), size);
    if (!ret_nocache) {
        free_pages((unsigned long)ret, order);
        return NULL;
    }

    *dma_handle = virt_to_phys(ret);
    return ret_nocache;
}
Exemple #7
0
static inline
void fill_hpc_entries(struct ip22_hostdata *hd, struct scsi_cmnd *cmd, int din)
{
	unsigned long len = cmd->SCp.this_residual;
	void *addr = cmd->SCp.ptr;
	dma_addr_t physaddr;
	unsigned long count;
	struct hpc_chunk *hcp;

	physaddr = dma_map_single(hd->dev, addr, len, DMA_DIR(din));
	cmd->SCp.dma_handle = physaddr;
	hcp = hd->cpu;

	while (len) {
		/*
		 * even cntinfo could be up to 16383, without
		 * magic only 8192 works correctly
		 */
		count = len > 8192 ? 8192 : len;
		hcp->desc.pbuf = physaddr;
		hcp->desc.cntinfo = count;
		hcp++;
		len -= count;
		physaddr += count;
	}

	/*
	 * To make sure, if we trip an HPC bug, that we transfer every single
	 * byte, we tag on an extra zero length dma descriptor at the end of
	 * the chain.
	 */
	hcp->desc.pbuf = 0;
	hcp->desc.cntinfo = HPCDMA_EOX;
	dma_cache_sync(hd->dev, hd->cpu,
		       (unsigned long)(hcp + 1) - (unsigned long)hd->cpu,
		       DMA_TO_DEVICE);
}
Exemple #8
0
void i2s_dma_tx_handler(u32 dma_ch)
{
	u32 i2s_status;
	
	i2s_status=i2s_inw(I2S_INT_STATUS);
	pi2s_config->tx_isr_cnt++;
	
	if(pi2s_config->bTxDMAEnable==0)
	{
	/*
		if(dma_ch==GDMA_I2S_TX0)
		{
			GdmaI2sTx((u32)pi2s_config->pPage0TxBuf8ptr, I2S_TX_FIFO_WREG, 0, I2S_PAGE_SIZE, i2s_dma_tx_handler, i2s_unmask_handler);
		}
		else
		{
			GdmaI2sTx((u32)pi2s_config->pPage1TxBuf8ptr, I2S_TX_FIFO_WREG, 1, I2S_PAGE_SIZE, i2s_dma_tx_handler, i2s_unmask_handler);
		}
	*/	
		MSG("TxDMA not enable\n");
		return;
	}
	
#ifdef 	I2S_STATISTIC
	if(pi2s_config->tx_isr_cnt%40==0)
		MSG("tisr i=%u,c=%u,o=%u,u=%d,s=%X [r=%d,w=%d]\n",pi2s_config->tx_isr_cnt,dma_ch,pi2s_status->txbuffer_ovrun,pi2s_status->txbuffer_unrun,i2s_status,pi2s_config->tx_r_idx,pi2s_config->tx_w_idx);
#endif

	if(pi2s_config->tx_r_idx==pi2s_config->tx_w_idx)
	{
		/* Buffer Empty */
		MSG("TXBE r=%d w=%d[i=%u,c=%u]\n",pi2s_config->tx_r_idx,pi2s_config->tx_w_idx,pi2s_config->tx_isr_cnt,dma_ch);
#ifdef I2S_STATISTIC		
		pi2s_status->txbuffer_unrun++;
#endif	
		if(dma_ch==GDMA_I2S_TX0)
		{
			memset(pi2s_config->pPage0TxBuf8ptr, 0, I2S_PAGE_SIZE);
			GdmaI2sTx((u32)pi2s_config->pPage0TxBuf8ptr, I2S_TX_FIFO_WREG, 0, I2S_PAGE_SIZE, i2s_dma_tx_handler, i2s_unmask_handler);
		}
		else
		{
			memset(pi2s_config->pPage1TxBuf8ptr, 0, I2S_PAGE_SIZE);
			GdmaI2sTx((u32)pi2s_config->pPage1TxBuf8ptr, I2S_TX_FIFO_WREG, 1, I2S_PAGE_SIZE, i2s_dma_tx_handler, i2s_unmask_handler);
		}
		
		goto EXIT;	
	}
	
	if(pi2s_config->pMMAPTxBufPtr[pi2s_config->tx_r_idx]==NULL)
	{
		MSG("mmap buf NULL\n");
		if(dma_ch==GDMA_I2S_TX0)
			GdmaI2sTx((u32)pi2s_config->pPage0TxBuf8ptr, I2S_TX_FIFO_WREG, 0, I2S_PAGE_SIZE, i2s_dma_tx_handler, i2s_unmask_handler);
		else
			GdmaI2sTx((u32)pi2s_config->pPage1TxBuf8ptr, I2S_TX_FIFO_WREG, 1, I2S_PAGE_SIZE, i2s_dma_tx_handler, i2s_unmask_handler);

		goto EXIT;	
	}
#ifdef I2S_STATISTIC	
	pi2s_status->txbuffer_len--;
#endif
	if(dma_ch==GDMA_I2S_TX0)
	{	
#if defined(CONFIG_I2S_MMAP)
		dma_cache_sync(NULL, pi2s_config->pMMAPTxBufPtr[pi2s_config->tx_r_idx], I2S_PAGE_SIZE, DMA_TO_DEVICE);
		GdmaI2sTx((u32)(pi2s_config->pMMAPTxBufPtr[pi2s_config->tx_r_idx]), I2S_TX_FIFO_WREG, 0, I2S_PAGE_SIZE, i2s_dma_tx_handler, i2s_unmask_handler);
#else
		memcpy(pi2s_config->pPage0TxBuf8ptr,  pi2s_config->pMMAPTxBufPtr[pi2s_config->tx_r_idx], I2S_PAGE_SIZE);			
		GdmaI2sTx((u32)(pi2s_config->pPage0TxBuf8ptr), I2S_TX_FIFO_WREG, 0, I2S_PAGE_SIZE, i2s_dma_tx_handler, i2s_unmask_handler);
#endif
		pi2s_config->dmach = GDMA_I2S_TX0;
		pi2s_config->tx_r_idx = (pi2s_config->tx_r_idx+1)%MAX_I2S_PAGE;
	}
	else
	{
#if defined(CONFIG_I2S_MMAP)
		dma_cache_sync(NULL, pi2s_config->pMMAPTxBufPtr[pi2s_config->tx_r_idx], I2S_PAGE_SIZE, DMA_TO_DEVICE);	
		GdmaI2sTx((u32)(pi2s_config->pMMAPTxBufPtr[pi2s_config->tx_r_idx]), I2S_TX_FIFO_WREG, 1, I2S_PAGE_SIZE, i2s_dma_tx_handler, i2s_unmask_handler);
#else
		memcpy(pi2s_config->pPage1TxBuf8ptr,  pi2s_config->pMMAPTxBufPtr[pi2s_config->tx_r_idx], I2S_PAGE_SIZE);
		GdmaI2sTx((u32)(pi2s_config->pPage1TxBuf8ptr), I2S_TX_FIFO_WREG, 1, I2S_PAGE_SIZE, i2s_dma_tx_handler, i2s_unmask_handler);
#endif
		pi2s_config->dmach = GDMA_I2S_TX1;
		pi2s_config->tx_r_idx = (pi2s_config->tx_r_idx+1)%MAX_I2S_PAGE;
		
	}
EXIT:	

	
	wake_up_interruptible(&(pi2s_config->i2s_tx_qh));
	
	
	return;
}
PNDIS_PACKET GetPacketFromRxRing(
	IN		PRTMP_ADAPTER	pAd,
	OUT		PRT28XX_RXD_STRUC	pSaveRxD,
	OUT		BOOLEAN			*pbReschedule,
	IN OUT	UINT32			*pRxPending)
{
	PRXD_STRUC				pRxD;
#ifdef RT_BIG_ENDIAN
	PRXD_STRUC				pDestRxD;
	RXD_STRUC				RxD;
#endif
	PNDIS_PACKET			pRxPacket = NULL;
	PNDIS_PACKET			pNewPacket;
	PVOID					AllocVa;
	NDIS_PHYSICAL_ADDRESS	AllocPa;
	BOOLEAN					bReschedule = FALSE;
	RTMP_DMACB				*pRxCell;

	RTMP_SEM_LOCK(&pAd->RxRingLock);

	if (*pRxPending == 0)
	{
		/* Get how may packets had been received*/
		RTMP_IO_READ32(pAd, RX_DRX_IDX , &pAd->RxRing.RxDmaIdx);

		if (pAd->RxRing.RxSwReadIdx == pAd->RxRing.RxDmaIdx)
		{
			/* no more rx packets*/
			bReschedule = FALSE;
			goto done;
		}

		/* get rx pending count*/
		if (pAd->RxRing.RxDmaIdx > pAd->RxRing.RxSwReadIdx)
			*pRxPending = pAd->RxRing.RxDmaIdx - pAd->RxRing.RxSwReadIdx;
		else
			*pRxPending	= pAd->RxRing.RxDmaIdx + RX_RING_SIZE - pAd->RxRing.RxSwReadIdx;

#ifdef DESC_32B_SUPPORT
		pRxCell = &pAd->RxRing.Cell[pAd->RxRing.RxSwReadIdx];
		dma_cache_sync(NULL, pRxCell->AllocVa, RXD_SIZE, DMA_FROM_DEVICE);
#endif /* DESC_32B_SUPPORT */
	}

	pRxCell = &pAd->RxRing.Cell[pAd->RxRing.RxSwReadIdx];

	/* flush dcache if no consistent memory is supported */
	RTMP_DCACHE_FLUSH(pRxCell->AllocPa, RXD_SIZE);

#ifdef RT_BIG_ENDIAN
	pDestRxD = (PRXD_STRUC) pRxCell->AllocVa;
	RxD = *pDestRxD;
	pRxD = &RxD;
	RTMPDescriptorEndianChange((PUCHAR)pRxD, TYPE_RXD);
#else
	/* Point to Rx indexed rx ring descriptor*/
	pRxD = (PRXD_STRUC) pRxCell->AllocVa;
#endif

	if (pRxD->DDONE == 0)
	{
		*pRxPending = 0;
		/* DMAIndx had done but DDONE bit not ready*/
		bReschedule = TRUE;
		goto done;
	}

#ifdef DESC_32B_SUPPORT
	prefetch(&pAd->RxRing.Cell[(pAd->RxRing.RxSwReadIdx + 1) % RX_RING_SIZE].AllocVa);
#endif /* DESC_32B_SUPPORT */

	/* return rx descriptor*/
	NdisMoveMemory(pSaveRxD, pRxD, RXD_SIZE);

	pNewPacket = RTMP_AllocateRxPacketBuffer(pAd, ((POS_COOKIE)(pAd->OS_Cookie))->pci_dev, RX_BUFFER_AGGRESIZE, FALSE, &AllocVa, &AllocPa);

	if (pNewPacket)
	{
		/* unmap the rx buffer*/
		PCI_UNMAP_SINGLE(pAd, pRxCell->DmaBuf.AllocPa,
					 pRxCell->DmaBuf.AllocSize, RTMP_PCI_DMA_FROMDEVICE);
		/* flush dcache if no consistent memory is supported */
		RTMP_DCACHE_FLUSH(pRxCell->DmaBuf.AllocPa, pRxCell->DmaBuf.AllocSize);

		pRxPacket = pRxCell->pNdisPacket;

		pRxCell->DmaBuf.AllocSize	= RX_BUFFER_AGGRESIZE;
		pRxCell->pNdisPacket		= (PNDIS_PACKET) pNewPacket;
		pRxCell->DmaBuf.AllocVa	= AllocVa;
		pRxCell->DmaBuf.AllocPa	= AllocPa;

		/* flush dcache if no consistent memory is supported */
		RTMP_DCACHE_FLUSH(pRxCell->DmaBuf.AllocPa, pRxCell->DmaBuf.AllocSize);

		/* update SDP0 to new buffer of rx packet */
		pRxD->SDP0 = AllocPa;

#ifdef RX_DMA_SCATTER
		pRxD->SDL0 = RX_BUFFER_AGGRESIZE;
#endif /* RX_DMA_SCATTER */
	}
	else 
	{
		/*DBGPRINT(RT_DEBUG_TRACE,("No Rx Buffer\n"));*/
		pRxPacket = NULL;
		bReschedule = TRUE;
	}

	/* had handled one rx packet*/
	*pRxPending = *pRxPending - 1;	

#ifndef CACHE_LINE_32B

	pRxD->DDONE = 0;

	/* update rx descriptor and kick rx */
#ifdef RT_BIG_ENDIAN
	RTMPDescriptorEndianChange((PUCHAR)pRxD, TYPE_RXD);
	WriteBackToDescriptor((PUCHAR)pDestRxD, (PUCHAR)pRxD, FALSE, TYPE_RXD);
#endif

#ifdef DESC_32B_SUPPORT
	dma_cache_sync(NULL, pRxCell->AllocVa, RXD_SIZE, DMA_TO_DEVICE);
#endif /* DESC_32B_SUPPORT */

	INC_RING_INDEX(pAd->RxRing.RxSwReadIdx, RX_RING_SIZE);

	pAd->RxRing.RxCpuIdx = (pAd->RxRing.RxSwReadIdx == 0) ? (RX_RING_SIZE-1) : (pAd->RxRing.RxSwReadIdx-1);
	RTMP_IO_WRITE32(pAd, RX_CRX_IDX, pAd->RxRing.RxCpuIdx);
#else /* CACHE_LINE_32B */

	/*
		Because our RXD_SIZE is 16B, but if the cache line size is 32B, we
		will suffer a problem as below:

		1. We flush RXD 0, start address of RXD 0 is 32B-align.
			Nothing occurs.
		2. We flush RXD 1, start address of RXD 1 is 16B-align.
			Because cache line size is 32B, cache must flush 32B, cannot flush
			16B only, so RXD0 and RXD1 will be flushed.
			But when traffic is busy, maybe RXD0 is updated by MAC, i.e.
			DDONE bit is 1, so when the cache flushs RXD0, the DDONE bit will
			be cleared to 0.
		3. Then when we handle RXD0 in the future, we will find the DDONE bit
			is 0 and we will wait for MAC to set it to 1 forever.
	*/
	if (pAd->RxRing.RxSwReadIdx & 0x01)
	{
		RTMP_DMACB *pRxCellLast;
#ifdef RT_BIG_ENDIAN
		PRXD_STRUC pDestRxDLast;
#endif
		/* 16B-align */

		/* update last BD 32B-align, DMA Done bit = 0 */
		pAd->RxRing.Cell[pAd->RxRing.RxSwReadIdx].LastBDInfo.DDONE = 0;
#ifdef RT_BIG_ENDIAN
		pRxCellLast = &pAd->RxRing.Cell[pAd->RxRing.RxSwReadIdx - 1];
		pDestRxDLast = (PRXD_STRUC) pRxCellLast->AllocVa;
		RTMPDescriptorEndianChange((PUCHAR)&pAd->RxRing.Cell[pAd->RxRing.RxSwReadIdx].LastBDInfo, TYPE_RXD);
		WriteBackToDescriptor((PUCHAR)pDestRxDLast, (PUCHAR)&pAd->RxRing.Cell[pAd->RxRing.RxSwReadIdx].LastBDInfo, FALSE, TYPE_RXD);
#endif

		/* update current BD 16B-align, DMA Done bit = 0 */
		pRxD->DDONE = 0;
#ifdef RT_BIG_ENDIAN
		RTMPDescriptorEndianChange((PUCHAR)pRxD, TYPE_RXD);
		WriteBackToDescriptor((PUCHAR)pDestRxD, (PUCHAR)pRxD, FALSE, TYPE_RXD);
#endif

		/* flush cache from last BD */
		RTMP_DCACHE_FLUSH(pRxCellLast->AllocPa, 32); /* use RXD_SIZE should be OK */

		/* update SW read and CPU index */
		INC_RING_INDEX(pAd->RxRing.RxSwReadIdx, RX_RING_SIZE);
		pAd->RxRing.RxCpuIdx = (pAd->RxRing.RxSwReadIdx == 0) ? (RX_RING_SIZE-1) : (pAd->RxRing.RxSwReadIdx-1);
		RTMP_IO_WRITE32(pAd, RX_CRX_IDX, pAd->RxRing.RxCpuIdx);
	}
	else
	{
		/* 32B-align */
		/* do not set DDONE bit and backup it */
		if (pAd->RxRing.RxSwReadIdx >= (RX_RING_SIZE-1))
		{
			DBGPRINT(RT_DEBUG_TRACE,
					("Please change RX_RING_SIZE to mutiple of 2!\n"));

			/* flush cache from current BD */
			RTMP_DCACHE_FLUSH(pRxCell->AllocPa, RXD_SIZE);

			/* update SW read and CPU index */
			INC_RING_INDEX(pAd->RxRing.RxSwReadIdx, RX_RING_SIZE);
			pAd->RxRing.RxCpuIdx = (pAd->RxRing.RxSwReadIdx == 0) ? (RX_RING_SIZE-1) : (pAd->RxRing.RxSwReadIdx-1);
			RTMP_IO_WRITE32(pAd, RX_CRX_IDX, pAd->RxRing.RxCpuIdx);
		}
		else
		{
			/* backup current BD */
			pRxCell = &pAd->RxRing.Cell[pAd->RxRing.RxSwReadIdx + 1];
			pRxCell->LastBDInfo = *pRxD;

			/* update CPU index */
			INC_RING_INDEX(pAd->RxRing.RxSwReadIdx, RX_RING_SIZE);
		}
	}
#endif /* CACHE_LINE_32B */

done:
	RTMP_SEM_UNLOCK(&pAd->RxRingLock);
	*pbReschedule = bReschedule;
	return pRxPacket;
}
static int map_sg_to_lli(struct scatterlist *sg, int num_elems,
                         struct lli *lli, dma_addr_t dma_lli,
                         void __iomem *dmadr_addr, int dir)
{
    int i, idx = 0;
    int fis_len = 0;
    dma_addr_t next_llp;
    int bl;

    dev_dbg(host_pvt.dwc_dev, "%s: sg=%p nelem=%d lli=%p dma_lli=0x%08x"
            " dmadr=0x%08x\n", __func__, sg, num_elems, lli, (u32)dma_lli,
            (u32)dmadr_addr);

    bl = get_burst_length_encode(AHB_DMA_BRST_DFLT);

    for (i = 0; i < num_elems; i++, sg++) {
        u32 addr, offset;
        u32 sg_len, len;

        addr = (u32) sg_dma_address(sg);
        sg_len = sg_dma_len(sg);

        dev_dbg(host_pvt.dwc_dev, "%s: elem=%d sg_addr=0x%x sg_len"
                "=%d\n", __func__, i, addr, sg_len);

        while (sg_len) {
            if (idx >= SATA_DWC_DMAC_LLI_NUM) {

                dev_err(host_pvt.dwc_dev, "LLI table overrun "
                        "(idx=%d)\n", idx);
                break;
            }
            len = (sg_len > SATA_DWC_DMAC_CTRL_TSIZE_MAX) ?
                  SATA_DWC_DMAC_CTRL_TSIZE_MAX : sg_len;

            offset = addr & 0xffff;
            if ((offset + sg_len) > 0x10000)
                len = 0x10000 - offset;

            if (fis_len + len > 8192) {
                dev_dbg(host_pvt.dwc_dev, "SPLITTING: fis_len="
                        "%d(0x%x) len=%d(0x%x)\n", fis_len,
                        fis_len, len, len);
                len = 8192 - fis_len;
                fis_len = 0;
            } else {
                fis_len += len;
            }
            if (fis_len == 8192)
                fis_len = 0;

            if (dir == DMA_FROM_DEVICE) {
                lli[idx].dar = cpu_to_le32(addr);
                lli[idx].sar = cpu_to_le32((u32)dmadr_addr);

                lli[idx].ctl.low = cpu_to_le32(
                                       DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) |
                                       DMA_CTL_SMS(0) |
                                       DMA_CTL_DMS(1) |
                                       DMA_CTL_SRC_MSIZE(bl) |
                                       DMA_CTL_DST_MSIZE(bl) |
                                       DMA_CTL_SINC_NOCHANGE |
                                       DMA_CTL_SRC_TRWID(2) |
                                       DMA_CTL_DST_TRWID(2) |
                                       DMA_CTL_INT_EN |
                                       DMA_CTL_LLP_SRCEN |
                                       DMA_CTL_LLP_DSTEN);
            } else {
                lli[idx].sar = cpu_to_le32(addr);
                lli[idx].dar = cpu_to_le32((u32)dmadr_addr);

                lli[idx].ctl.low = cpu_to_le32(
                                       DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) |
                                       DMA_CTL_SMS(1) |
                                       DMA_CTL_DMS(0) |
                                       DMA_CTL_SRC_MSIZE(bl) |
                                       DMA_CTL_DST_MSIZE(bl) |
                                       DMA_CTL_DINC_NOCHANGE |
                                       DMA_CTL_SRC_TRWID(2) |
                                       DMA_CTL_DST_TRWID(2) |
                                       DMA_CTL_INT_EN |
                                       DMA_CTL_LLP_SRCEN |
                                       DMA_CTL_LLP_DSTEN);
            }

            dev_dbg(host_pvt.dwc_dev, "%s setting ctl.high len: "
                    "0x%08x val: 0x%08x\n", __func__,
                    len, DMA_CTL_BLK_TS(len / 4));


            lli[idx].ctl.high = cpu_to_le32(DMA_CTL_BLK_TS\
                                            (len / 4));

            next_llp = (dma_lli + ((idx + 1) * sizeof(struct \
                                   lli)));


            next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER2);

            lli[idx].llp = cpu_to_le32(next_llp);
            idx++;
            sg_len -= len;
            addr += len;
        }
    }

    if (idx) {
        lli[idx-1].llp = 0x00000000;
        lli[idx-1].ctl.low &= DMA_CTL_LLP_DISABLE_LE32;


        dma_cache_sync(NULL, lli, (sizeof(struct lli) * idx),
                       DMA_BIDIRECTIONAL);
    }

    return idx;
}
ssize_t ar7240_i2s_write(struct file * filp, const char __user * buf,
			 size_t count, loff_t * f_pos, int resume)
{
#define prev_tail(t) ({ (t == 0) ? (NUM_DESC - 1) : (t - 1); })
#define next_tail(t) ({ (t == (NUM_DESC - 1)) ? 0 : (t + 1); })

//	uint8_t *data;
	ssize_t retval;
	
	int byte_cnt, offset, need_start = 0;
	int mode = 0;
	struct ar7240_i2s_softc *sc = &sc_buf_var;
	i2s_dma_buf_t *dmabuf = &sc->sc_pbuf;
	i2s_buf_t *scbuf;
	ar7240_mbox_dma_desc *desc;
	int tail = dmabuf->tail;
	unsigned long desc_p;
    int data_len = 0;


	I2S_LOCK(sc);

	byte_cnt = count;
	//printk("count:%d\n",count);
	//printk("byte_cnt:%d\n",byte_cnt);

	if (sc->popened < 2) {
        ar7240_reg_rmw_set(MBOX_INT_ENABLE, MBOX0_RX_DMA_COMPLETE | RX_UNDERFLOW);
		need_start = 1;
	}

	sc->popened = 2;

	scbuf = dmabuf->db_buf;
	desc = dmabuf->db_desc;
	desc_p = (unsigned long) dmabuf->db_desc_p;
	offset = 0;
	//data = scbuf[0].bf_vaddr;

	desc_p += tail * sizeof(ar7240_mbox_dma_desc);

	while (byte_cnt && !desc[tail].OWN) {
        if (byte_cnt >= I2S_BUF_SIZE) {
			desc[tail].length = I2S_BUF_SIZE;
			byte_cnt -= I2S_BUF_SIZE;
            data_len = I2S_BUF_SIZE;
		} else {
			desc[tail].length = byte_cnt;
            data_len = byte_cnt;
			byte_cnt = 0;
		}

        if(!filp)
        {
            memcpy(scbuf[tail].bf_vaddr, buf + offset, data_len);
        }
        else
        {
            retval = copy_from_user(scbuf[tail].bf_vaddr, buf + offset, data_len);
            if (retval)
                return retval;
        }
		ar7240_cache_inv(scbuf[tail].bf_vaddr, desc[tail].length);

        dma_cache_sync(NULL, scbuf[tail].bf_vaddr, desc[tail].length, DMA_TO_DEVICE);
		
		desc[tail].BufPtr = (unsigned int) scbuf[tail].bf_paddr;
		desc[tail].OWN = 1;
		tail = next_tail(tail);
		offset += data_len;
	}

	dmabuf->tail = tail;

	if (need_start) {
		ar7240_i2sound_dma_desc((unsigned long) desc_p, mode);
		ar7240_i2sound_dma_start(mode);
	}
	else if (!sc->ppause) {
		ar7240_i2sound_dma_resume(mode);
	}

//    if (resume)
//        ar7240_i2sound_dma_resume(mode);

	I2S_UNLOCK(sc);
	//all_data+=(count - byte_cnt);

	//printk("all_data:%ld\n",all_data);
	//glzt_msleep(10);

	return count - byte_cnt;
}
Exemple #12
0
static long vpu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	struct miscdevice *dev = filp->private_data;
	struct jz_vpu *vpu = container_of(dev, struct jz_vpu, mdev);
	struct flush_cache_info info;
	int ret = 0;
    unsigned int status = 0;

	switch (cmd) {
	case WAIT_COMPLETE:
		ret = wait_for_completion_interruptible_timeout(
			&vpu->done, msecs_to_jiffies(200));
		if (ret > 0) {
		        status = vpu->status;
		} else {
			dev_warn(vpu->dev, "[%d:%d] wait_for_completion timeout\n",
				 current->tgid, current->pid);
			if (vpu_reset(vpu) < 0)
				status = 0;
			vpu->done.done = 0;
		}
		if (copy_to_user((void *)arg, &status, sizeof(status)))
			ret = -EFAULT;
		break;
		
	case LOCK:
		if (vpu->owner_pid == current->pid) {
			dev_err(vpu->dev, "[%d:%d] dead lock\n",
				current->tgid, current->pid);
			ret = -EINVAL;
			break;
		}

		if (mutex_lock_interruptible(&vpu->mutex) != 0) {
			dev_err(vpu->dev, "[%d:%d] lock error!\n",
				current->tgid, current->pid);
			ret = -EIO;
			break;
		}
		vpu->owner_pid = current->pid;
		dev_dbg(vpu->dev, "[%d:%d] lock\n", current->tgid, current->pid);

		break;

	case UNLOCK:
		mutex_unlock(&vpu->mutex);
		vpu->owner_pid = 0;
		dev_dbg(vpu->dev, "[%d:%d] unlock\n", current->tgid, current->pid);
		break;

	case FLUSH_CACHE:
		if (copy_from_user(&info, (void *)arg, sizeof(info))) {
			ret = -EFAULT;
			break;
		}

		dma_cache_sync(NULL, (void *)info.addr, info.len, info.dir);
		dev_dbg(vpu->dev, "[%d:%d] flush cache\n", current->tgid, current->pid);
		break;
	default:
		break;
	}

	return ret;
}
Exemple #13
0
static unsigned long neo_sdram_dma_read_bus_direct
(zion_params_t *params, unsigned long offset_addr, void *buf, unsigned long size, int ch)
{
  unsigned long left_size;
  unsigned long entry_size;
  unsigned long upper = 0, lower = 0;
  int ret;

  /* check DWORD Alignment */
  if( (offset_addr%4)||(size%4) )
    {
      PERROR("Invalid Size or Address.\n");
      return 0;
    }

  /* check Setting of DMA Region */
  neo_get_region(params, ch, &lower, &upper);

  if(lower >= upper)
    {
      PERROR("No Region Specified.\n");
      return 0;
    }

  /* check size of area and size of IO */
  if(lower+offset_addr+size > upper)
    {
      size = upper - (lower + offset_addr);
    }

  if(size<=0)
    {
      return 0;
    }

  /* size to be read */
  left_size = size;

  /* Initalize SG table etc. */
  init_dma_ch(params,ch);

 DMA_READ:

  entry_size = DMA_MAX_ENTRY_SIZE;

  ret = make_sg_table_bus_direct(params, ch, &left_size, buf);

  if(ret)
    {
      size=0;
      left_size = 0;
      goto READ_RELEASE;
    }

  /* Set Registers and Set Timeout */
  neo_dma_prepare_bus_direct(params, ch, ZION_DMA_READ, (lower+offset_addr));

  /* Inv Cache */
//  dma_cache_inv(bus_to_virt((unsigned long)buf), size);
  dma_cache_sync(&params->dev->dev, bus_to_virt((unsigned long)buf), size, DMA_FROM_DEVICE);

  disable_irq(params->dev->irq);

  /* DMA Run */
  pci_write_config_word(params->dev, NEO_PCI_DMA_COMMAND(ch), 
			NEO_IO_DERECTION_READ|NEO_DMA_RUN|NEO_DMA_OPEN);

  ZION_PCI_PARAM(params)->dma_params[ch].timer.expires = jiffies + NEO_DMA_TIMEOUT;  
  add_timer(&(ZION_PCI_PARAM(params)->dma_params[ch].timer));

  enable_irq(params->dev->irq);
  
  /* Sleep */
  wait_event(ZION_PCI_PARAM(params)->dma_params[ch].neo_dma_wait_queue,
	     ZION_PCI_PARAM(params)->dma_params[ch].condition != ZION_PCI_INT_DISPATCH_PENDING);
  
  /* DMA End */
  if(ZION_PCI_PARAM(params)->dma_params[ch].condition == ZION_PCI_INT_DISPATCH_TIMEOUT)
    {
      PERROR("ZION DMA READ Timeout.\n");
      size = 0;
      left_size = 0;
      goto READ_RELEASE;
    }

 READ_RELEASE:

  release_dma_entry_bus_direct(params, ch);

  if(left_size)
    {
      //((u8 *)buf) += (size-left_size);
      buf += (size-left_size);
      goto  DMA_READ;
    }

  return size;
}
Exemple #14
0
static long vpu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	struct miscdevice *dev = filp->private_data;
	struct jz_vpu *vpu = container_of(dev, struct jz_vpu, mdev);
	struct flush_cache_info info;
	int ret = 0;
	unsigned int status = 0;

	volatile unsigned long flags;
	unsigned int addr, size;
	unsigned int * arg_r;
	int i,num;

	switch (cmd) {
	case CMD_WAIT_COMPLETE:
		ret = wait_for_completion_interruptible_timeout(
			&vpu->done, msecs_to_jiffies(200));
		if (ret > 0) {
			status = vpu->status;
		} else {
			dev_warn(vpu->dev, "[%d:%d] wait_for_completion timeout\n",
				 current->tgid, current->pid);
			if (vpu_reset(vpu) < 0)
				status = 0;
			vpu->done.done = 0;
		}
		if (copy_to_user((void *)arg, &status, sizeof(status)))
			ret = -EFAULT;
		break;
	case LOCK:
		if (vpu->owner_pid == current->pid) {
			dev_err(vpu->dev, "[%d:%d] dead lock\n",
				current->tgid, current->pid);
			ret = -EINVAL;
			break;
		}

		if (mutex_lock_interruptible(&vpu->mutex) != 0) {
			dev_err(vpu->dev, "[%d:%d] lock error!\n",
				current->tgid, current->pid);
			ret = -EIO;
			break;
		}
		vpu->owner_pid = current->pid;
		dev_dbg(vpu->dev, "[%d:%d] lock\n", current->tgid, current->pid);
		break;
	case UNLOCK:
		mutex_unlock(&vpu->mutex);
		vpu->owner_pid = 0;
		dev_dbg(vpu->dev, "[%d:%d] unlock\n", current->tgid, current->pid);
		break;
	case FLUSH_CACHE:
		if (copy_from_user(&info, (void *)arg, sizeof(info))) {
			ret = -EFAULT;
			break;
		}
		dma_cache_sync(NULL, (void *)info.addr, info.len, info.dir);
		dev_dbg(vpu->dev, "[%d:%d] flush cache\n", current->tgid, current->pid);
		break;
	case CMD_VPU_PHY:
		arg_r = (unsigned int *)arg;
		*arg_r = (0x1fffffff) & jz_tcsm_start;
		break;
	case CMD_VPU_CACHE:
		arg_r = (unsigned int *)arg;
		addr = (unsigned int)arg_r[0];
		size = arg_r[1];
		dma_cache_wback_inv(addr, size);
		break;
	case CMD_VPU_DMA_NOTLB:
		local_irq_save(flags);
		arg_r = (unsigned int *)arg;
		REG_VPU_LOCK |= VPU_NEED_WAIT_END_FLAG;
		for(i = 0;i < 4; i += 2){
			*(unsigned int *)(arg_r[i]) = arg_r[i+1];
			printk("arg[%d]=%x arg[%d]=%d",i,arg_r[i],i+1,arg_r[i+1]);
		}
		local_irq_restore(flags);
		break;
	case CMD_VPU_DMA_TLB:
		local_irq_save(flags);
		arg_r = (unsigned int *)arg;
		REG_VPU_LOCK |= VPU_NEED_WAIT_END_FLAG;
		for(i = 0;i < 10;i += 2)
			*(unsigned int *)(arg_r[i]) = arg_r[i+1];
		local_irq_restore(flags);
		break;
	case CMD_VPU_CLEAN_WAIT_FLAG:
		local_irq_save(flags);
		while( !(( REG_VPU_LOCK &(VPU_WAIT_OK)) ||( REG_VPU_STATUS&(VPU_END))) )
			;
		REG_VPU_LOCK &= ~(VPU_NEED_WAIT_END_FLAG);
		if(REG_VPU_LOCK & VPU_WAIT_OK)
			REG_VPU_LOCK &= ~(VPU_WAIT_OK);
		local_irq_restore(flags);
		break;
	case  CMD_VPU_RESET:
		local_irq_save(flags);
		REG_CPM_VPU_SWRST |= CPM_VPU_STP;
		while(!(REG_CPM_VPU_SWRST & CPM_VPU_ACK))
			;
		REG_CPM_VPU_SWRST = ((REG_CPM_VPU_SWRST | CPM_VPU_SR) & ~CPM_VPU_STP);
		REG_CPM_VPU_SWRST = (REG_CPM_VPU_SWRST & ~CPM_VPU_SR & ~CPM_VPU_STP);
		REG_VPU_LOCK = 0;
		local_irq_restore(flags);
		break;
	case   CMD_VPU_SET_REG:
		local_irq_save(flags);
		num = *(unsigned int*)arg;
		arg += 4;
		arg_r = (unsigned int *)arg;
		REG_VPU_LOCK |= VPU_NEED_WAIT_END_FLAG;
		for(i = 0;i < num; i += 2)
			*(unsigned int *)(arg_r[i]) = arg_r[i+1];
		local_irq_restore(flags);
		break;
	default:
		break;
	}

	return ret;
}
static inline void dma_sync_desc_dev(struct net_device *dev, void *addr)
{
	dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
		       DMA_TO_DEVICE);
}
Exemple #16
0
/*
 * Function: map_sg_to_lli
 * The Synopsis driver has a comment proposing that better performance
 * is possible by only enabling interrupts on the last item in the linked list.
 * However, it seems that could be a problem if an error happened on one of the
 * first items.  The transfer would halt, but no error interrupt would occur.
 * Currently this function sets interrupts enabled for each linked list item:
 * DMA_CTL_INT_EN.
 */
static int map_sg_to_lli(struct scatterlist *sg, int num_elems,
			struct lli *lli, dma_addr_t dma_lli,
			void __iomem *dmadr_addr, int dir)
{
	int i, idx = 0;
	int fis_len = 0;
	dma_addr_t next_llp;
	int bl;

	dev_dbg(host_pvt.dwc_dev, "%s: sg=%p nelem=%d lli=%p dma_lli=0x%08x"
		" dmadr=0x%08x\n", __func__, sg, num_elems, lli, (u32)dma_lli,
		(u32)dmadr_addr);

	bl = get_burst_length_encode(AHB_DMA_BRST_DFLT);

	for (i = 0; i < num_elems; i++, sg++) {
		u32 addr, offset;
		u32 sg_len, len;

		addr = (u32) sg_dma_address(sg);
		sg_len = sg_dma_len(sg);

		dev_dbg(host_pvt.dwc_dev, "%s: elem=%d sg_addr=0x%x sg_len"
			"=%d\n", __func__, i, addr, sg_len);

		while (sg_len) {
			if (idx >= SATA_DWC_DMAC_LLI_NUM) {
				/* The LLI table is not large enough. */
				dev_err(host_pvt.dwc_dev, "LLI table overrun "
				"(idx=%d)\n", idx);
				break;
			}
			len = (sg_len > SATA_DWC_DMAC_CTRL_TSIZE_MAX) ?
				SATA_DWC_DMAC_CTRL_TSIZE_MAX : sg_len;

			offset = addr & 0xffff;
			if ((offset + sg_len) > 0x10000)
				len = 0x10000 - offset;

			/*
			 * Make sure a LLI block is not created that will span
			 * 8K max FIS boundary.  If the block spans such a FIS
			 * boundary, there is a chance that a DMA burst will
			 * cross that boundary -- this results in an error in
			 * the host controller.
			 */
			if (fis_len + len > 8192) {
				dev_dbg(host_pvt.dwc_dev, "SPLITTING: fis_len="
					"%d(0x%x) len=%d(0x%x)\n", fis_len,
					 fis_len, len, len);
				len = 8192 - fis_len;
				fis_len = 0;
			} else {
				fis_len += len;
			}
			if (fis_len == 8192)
				fis_len = 0;

			/*
			 * Set DMA addresses and lower half of control register
			 * based on direction.
			 */
			if (dir == DMA_FROM_DEVICE) {
				lli[idx].dar = cpu_to_le32(addr);
				lli[idx].sar = cpu_to_le32((u32)dmadr_addr);

				lli[idx].ctl.low = cpu_to_le32(
					DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) |
					DMA_CTL_SMS(0) |
					DMA_CTL_DMS(1) |
					DMA_CTL_SRC_MSIZE(bl) |
					DMA_CTL_DST_MSIZE(bl) |
					DMA_CTL_SINC_NOCHANGE |
					DMA_CTL_SRC_TRWID(2) |
					DMA_CTL_DST_TRWID(2) |
					DMA_CTL_INT_EN |
					DMA_CTL_LLP_SRCEN |
					DMA_CTL_LLP_DSTEN);
			} else {	/* DMA_TO_DEVICE */
				lli[idx].sar = cpu_to_le32(addr);
				lli[idx].dar = cpu_to_le32((u32)dmadr_addr);

				lli[idx].ctl.low = cpu_to_le32(
					DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) |
					DMA_CTL_SMS(1) |
					DMA_CTL_DMS(0) |
					DMA_CTL_SRC_MSIZE(bl) |
					DMA_CTL_DST_MSIZE(bl) |
					DMA_CTL_DINC_NOCHANGE |
					DMA_CTL_SRC_TRWID(2) |
					DMA_CTL_DST_TRWID(2) |
					DMA_CTL_INT_EN |
					DMA_CTL_LLP_SRCEN |
					DMA_CTL_LLP_DSTEN);
			}

			dev_dbg(host_pvt.dwc_dev, "%s setting ctl.high len: "
				"0x%08x val: 0x%08x\n", __func__,
				len, DMA_CTL_BLK_TS(len / 4));

			/* Program the LLI CTL high register */
			lli[idx].ctl.high = cpu_to_le32(DMA_CTL_BLK_TS\
						(len / 4));

			/* Program the next pointer.  The next pointer must be
			 * the physical address, not the virtual address.
			 */
			next_llp = (dma_lli + ((idx + 1) * sizeof(struct \
							lli)));

			/* The last 2 bits encode the list master select. */
			next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER2);

			lli[idx].llp = cpu_to_le32(next_llp);
			idx++;
			sg_len -= len;
			addr += len;
		}
	}

	/*
	 * The last next ptr has to be zero and the last control low register
	 * has to have LLP_SRC_EN and LLP_DST_EN (linked list pointer source
	 * and destination enable) set back to 0 (disabled.) This is what tells
	 * the core that this is the last item in the linked list.
	 */
	if (idx) {
		lli[idx-1].llp = 0x00000000;
		lli[idx-1].ctl.low &= DMA_CTL_LLP_DISABLE_LE32;

		/* Flush cache to memory */
		dma_cache_sync(NULL, lli, (sizeof(struct lli) * idx),
			       DMA_BIDIRECTIONAL);
	}

	return idx;
}
ssize_t ar7240_i2s_read(struct file * filp, char __user * buf,size_t count, loff_t * f_pos)//gl-inet
{
#define prev_tail(t) ({ (t == 0) ? (NUM_DESC - 1) : (t - 1); })
#define next_tail(t) ({ (t == (NUM_DESC - 1)) ? 0 : (t + 1); })

	uint8_t *data;
	//ssize_t retval;
	unsigned long retval;
	struct ar7240_i2s_softc *sc = &sc_buf_var;
	i2s_dma_buf_t *dmabuf = &sc->sc_rbuf;
	i2s_buf_t *scbuf;
	ar7240_mbox_dma_desc *desc;
	unsigned int byte_cnt, mode = 1, offset = 0, tail = dmabuf->tail;
	unsigned long desc_p;
	int need_start = 0;

	byte_cnt = count;

	if (sc->ropened < 2) {
		ar7240_reg_rmw_set(MBOX_INT_ENABLE, MBOX0_TX_DMA_COMPLETE);
		need_start = 1;
	}

	sc->ropened = 2;

	scbuf = dmabuf->db_buf;
	desc = dmabuf->db_desc;
	desc_p = (unsigned long) dmabuf->db_desc_p;
	data = scbuf[0].bf_vaddr;

	desc_p += tail * sizeof(ar7240_mbox_dma_desc);

	while (byte_cnt && !desc[tail].OWN) {
		if (byte_cnt >= I2S_BUF_SIZE) {
			desc[tail].length = I2S_BUF_SIZE;
			byte_cnt -= I2S_BUF_SIZE;
		} else {
			desc[tail].length = byte_cnt;
			byte_cnt = 0;
		}
		//ar7240_dma_cache_sync(scbuf[tail].bf_vaddr, desc[tail].length);//gl-inet

		dma_cache_sync(NULL, scbuf[tail].bf_vaddr, desc[tail].length, DMA_FROM_DEVICE);//gl-inet		
		desc[tail].rsvd2 = 0;//gl-inet
		
		retval = copy_to_user((buf + offset), (scbuf[tail].bf_vaddr), I2S_BUF_SIZE);

		if (retval)
			return retval;
		desc[tail].BufPtr = (unsigned int) scbuf[tail].bf_paddr;
		desc[tail].OWN = 1;

		tail = next_tail(tail);
		offset += I2S_BUF_SIZE;
	}

	dmabuf->tail = tail;

	if (need_start) {
		ar7240_i2sound_dma_desc((unsigned long) desc_p, mode);
        if (filp) {
		    ar7240_i2sound_dma_start(mode);
        }
	} else if (!sc->rpause) {
		ar7240_i2sound_dma_resume(mode);
	}

	return offset;
}