int rtl_cipher_crypt(struct crypto_cipher *cipher, u8 bEncrypt,
	struct rtl_cipher_ctx *ctx, u8 *src, unsigned int nbytes, u8 *iv, u8 *dst)
{
	unsigned int bsize = crypto_cipher_blocksize(cipher);
	u8 *key = bEncrypt ? ctx->key : ctx->mode & 0x20 ? ctx->aes_dekey : ctx->key;
	rtl_ipsecScatter_t scatter[1];
	u32 flag_encrypt = bEncrypt ? 4 : 0;
	int err;

#ifdef CONFIG_RTK_VOIP_DBG
	printk("%s: src=%p, len=%d, blk=%d, key=%p, iv=%p, dst=%p\n", __FUNCTION__,
		src, nbytes, bsize, key, iv, dst);

	rtl_crypto_hexdump((void *) src, nbytes);
	rtl_crypto_hexdump((void *) key, ctx->key_length);
	rtl_crypto_hexdump((void *) iv, bsize);
#endif

	dma_cache_wback((u32) src, nbytes);
	dma_cache_wback((u32) key, ctx->key_length);
	dma_cache_wback((u32) iv, bsize);

	scatter[0].len = (nbytes / bsize) * bsize;
	scatter[0].ptr = (void *) CKSEG1ADDR(src);

	/*
		int32 rtl_ipsecEngine(uint32 modeCrypto, uint32 modeAuth, 
			uint32 cntScatter, rtl_ipsecScatter_t *scatter, void *pCryptResult,
			uint32 lenCryptoKey, void* pCryptoKey, 
			uint32 lenAuthKey, void* pAuthKey, 
			void* pIv, void* pPad, void* pDigest,
			uint32 a2eo, uint32 enl)
	*/
	err = rtl_ipsecEngine(ctx->mode | flag_encrypt,
		-1, 1, scatter,
		(void *) CKSEG1ADDR(dst),
		ctx->key_length, (void *) CKSEG1ADDR(key),
		0, NULL,
		(void *) CKSEG1ADDR(iv), NULL, NULL,
		0, scatter[0].len);

	if (unlikely(err))
		printk("%s: rtl_ipsecEngine failed\n", __FUNCTION__);

	dma_cache_inv((u32) dst, nbytes);
#ifdef CONFIG_RTK_VOIP_DBG
	printk("result:\n");
	rtl_crypto_hexdump(dst, nbytes);
#endif

	// return handled bytes, even err! (for blkcipher_walk)
	return nbytes - scatter[0].len;
}
Exemple #2
0
// this is data moving from memory to nand.
int _ra_nand_dma_push(unsigned long src, int len)
{
	int ret = 0;
	
#if !defined (__UBOOT__) // uboot set kseg0 as noncache
	dma_cache_wback(src, len);
#else
	flush_cache(src, len);
#endif

	// set GDMA 
	_set_gdma_ch(NFC_DATA, PHYSADDR((void*)src), len,  
		     BURST_SIZE_4B, HW_MODE, DMA_REQMEM, DMA_NAND_REQ, 
		     TRN_INC, TRN_FIX);

	// start and wait dma done
	if (_nand_dma_sync()) {
		printk("%s: gdma: fail, dst:%lx, len:%x \n", __func__, src, len);
		ret = -1;
	}

	
	// disable dma
	_release_dma_buf();

	
	return ret;
}
void __cpuinit brcm_wr_vec(unsigned long dst, char *start, char *end)
{
	memcpy((void *)dst, start, end - start);
	dma_cache_wback((unsigned long)start, end - start);
	local_flush_icache_range(dst, dst + (end - start));
	instruction_hazard();
}
Exemple #4
0
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
	enum dma_data_direction direction)
{
	unsigned long addr = (unsigned long) ptr;

	switch (direction) {
	case DMA_TO_DEVICE:
		dma_cache_wback(addr, size);
		break;

	case DMA_FROM_DEVICE:
		dma_cache_inv(addr, size);
		break;

	case DMA_BIDIRECTIONAL:
		dma_cache_wback_inv(addr, size);
		break;

	default:
		BUG();
	}

	addr = virt_to_phys(ptr)&RAM_OFFSET_MASK;;
	if(dev == NULL)
	    addr+=CRIME_HI_MEM_BASE;
	return (dma_addr_t)addr;
}
Exemple #5
0
static void
powertecscsi_invalidate(char *addr, long len, fasdmadir_t direction)
{
	if (direction == DMA_OUT)
		dma_cache_wback((unsigned long)addr, (unsigned long)len);
	else
		dma_cache_inv((unsigned long)addr, (unsigned long)len);
}
Exemple #6
0
static int pcu_dma_tasklet_write(uint32_t virtual_addr_buffer, L_OFF_T external_physical_device_address, uint32_t pcu_dma_len)
{
    uint32_t  phys_mem;
    int ret = 0;

    unsigned long flags;

    if (KSEGX(virtual_addr_buffer) == KSEG0) {
        phys_mem = virt_to_phys((void *)virtual_addr_buffer);
        dma_cache_wback(virtual_addr_buffer, pcu_dma_len);
    } else {
        phys_mem = virt_to_phys((void *)pcu_dma_buf);
        memcpy(pcu_dma_buf, (void *)virtual_addr_buffer, pcu_dma_len);
        dma_cache_wback((unsigned long)pcu_dma_buf, pcu_dma_len);
    }

    spin_lock_irqsave(&gPcuDmaIsrData.lock, flags);
    gPcuDmaIsrData.flashAddr = __ll_low(external_physical_device_address);
    gPcuDmaIsrData.dramAddr = phys_mem;
    
    gPcuDmaIsrData.cmd = PCU_DMA_WRITE;
    gPcuDmaIsrData.opComplete = 0;
    gPcuDmaIsrData.status = 0;
    
    /* On write we wait for both DMA done|error and Flash Status */
    gPcuDmaIsrData.mask =  PCU_DMA_INTR2_STATUS_NAND_CHNL_EOB_STAT_MASK;
    gPcuDmaIsrData.expect =  PCU_DMA_INTR2_STATUS_NAND_CHNL_EOB_STAT_MASK;
    gPcuDmaIsrData.error = 0; /* no error indication */
    gPcuDmaIsrData.intr =  PCU_DMA_INTR2_STATUS_NAND_CHNL_EOB_STAT_MASK; /* write back 1 to clear */
    spin_unlock_irqrestore(&gPcuDmaIsrData.lock, flags);

    /*
     * Enable L2 Interrupt
     */
    PCU_DMA_CLRI();
    ISR_enable_irq();
    
    pcu_dma_issue_command(phys_mem, external_physical_device_address, PCU_DMA_WRITE, pcu_dma_len); /* 1: Is a Read, 0 Is a Write */

    //Do not wait for completion here;  This is done in brcmnand35xxx_base.c ???

    return ret;
}
Exemple #7
0
Fichier : hwcs.c Projet : jhbsz/102
/*
 * Start checksum engine
 */
__sum16 ath_hwcs_start(void *buf, int len)
{
	// Initialize descriptor with buffer address, packet size
	//ath_hwcs_tx_desc->buf = (char *)dma_map_single(NULL, buf, len, DMA_TO_DEVICE);
	ath_hwcs_tx_desc->buf = (char *)virt_to_phys(buf);
	ath_hwcs_tx_desc->status_only = 0;
	ath_hwcs_tx_desc->info.status = (ATH_HWCS_TX_SOF_MASK | ATH_HWCS_TX_EOF_MASK | ATH_HWCS_INTR_ENABLE);
	ath_hwcs_tx_desc->info.control.pktSize = len;

	dma_cache_wback((unsigned long)ath_hwcs_tx_desc, sizeof(ath_hwcs_desc_t));
	udelay(1); /* delay is required to get the status properly between cache flush and DMA enable */ 
	// Enable DMA packet transfer
	ath_reg_wr(ATH_HWCS_DMATX_CONTROL0, ATH_HWCS_DMATX_ENABLE);
	ath_start_csum = 1;

	return 0;
}
Exemple #8
0
/*
 * streaming DMA Mapping API...
 * CPU accesses page via normal paddr, thus needs to explicitly made
 * consistent before each use
 */
static void _dma_cache_sync(phys_addr_t paddr, size_t size,
		enum dma_data_direction dir)
{
	switch (dir) {
	case DMA_FROM_DEVICE:
		dma_cache_inv(paddr, size);
		break;
	case DMA_TO_DEVICE:
		dma_cache_wback(paddr, size);
		break;
	case DMA_BIDIRECTIONAL:
		dma_cache_wback_inv(paddr, size);
		break;
	default:
		pr_err("Invalid DMA dir [%d] for OP @ %pa[p]\n", dir, &paddr);
	}
}
static inline void __dma_sync(unsigned long addr, size_t size,
	enum dma_data_direction direction)
{
	switch (direction) {
	case DMA_TO_DEVICE:
		dma_cache_wback(addr, size);
		break;

	case DMA_FROM_DEVICE:
		dma_cache_inv(addr, size);
		break;

	case DMA_BIDIRECTIONAL:
		dma_cache_wback_inv(addr, size);
		break;

	default:
		BUG();
	}
}
Exemple #10
0
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
		size_t size, enum dma_data_direction dir)
{
	switch (dir) {
	case DMA_TO_DEVICE:
		dma_cache_wback(paddr, size);
		break;

	case DMA_FROM_DEVICE:
		dma_cache_inv(paddr, size);
		break;

	case DMA_BIDIRECTIONAL:
		dma_cache_wback_inv(paddr, size);
		break;

	default:
		break;
	}
}
Exemple #11
0
/* transmit packet */
static int rc32434_send_packet(struct sk_buff *skb, struct net_device *dev)
{
	struct rc32434_local		*lp = (struct rc32434_local *)dev->priv;
	unsigned long 			flags;
	u32					length;
	DMAD_t				td;
	
	
	spin_lock_irqsave(&lp->lock, flags);
	
	td = &lp->td_ring[lp->tx_chain_tail];
	
	/* stop queue when full, drop pkts if queue already full */
	if(lp->tx_count >= (RC32434_NUM_TDS - 2)) {
		lp->tx_full = 1;
		
		if(lp->tx_count == (RC32434_NUM_TDS - 2)) {
			netif_stop_queue(dev);
		}
		else {
			lp->stats.tx_dropped++;
			dev_kfree_skb_any(skb);
			spin_unlock_irqrestore(&lp->lock, flags);
			return 1;
		}	   
	}	 
	
	lp->tx_count ++;
	
	lp->tx_skb[lp->tx_chain_tail] = skb;
	
	length = skb->len;
	dma_cache_wback((u32)skb->data, skb->len);
	
	/* Setup the transmit descriptor. */
	dma_cache_inv((u32) td, sizeof(*td));
	td->ca = CPHYSADDR(skb->data);
	
	if(__raw_readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
		if( lp->tx_chain_status == empty ) {
			td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m;                                /*  Update tail      */
			lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK;                          /*   Move tail       */
			__raw_writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), &(lp->tx_dma_regs->dmandptr)); /* Write to NDPTR    */
			lp->tx_chain_head = lp->tx_chain_tail;                                                  /* Move head to tail */
		}
		else {
			td->control = DMA_COUNT(length) |DMAD_cof_m|DMAD_iof_m;                                 /* Update tail */
			lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].control &=  ~(DMAD_cof_m);          /* Link to prev */
			lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].link =  CPHYSADDR(td);              /* Link to prev */
			lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK;                          /* Move tail */
			__raw_writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), &(lp->tx_dma_regs->dmandptr)); /* Write to NDPTR */
			lp->tx_chain_head = lp->tx_chain_tail;                                                  /* Move head to tail */
			lp->tx_chain_status = empty;
		}
	}
	else {
		if( lp->tx_chain_status == empty ) {
			td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m;                                /* Update tail */
			lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK;                          /* Move tail */
			lp->tx_chain_status = filled;
		}
		else {
			td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m;                                /* Update tail */
			lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].control &=  ~(DMAD_cof_m);          /* Link to prev */
			lp->td_ring[(lp->tx_chain_tail-1)& RC32434_TDS_MASK].link =  CPHYSADDR(td);              /* Link to prev */
			lp->tx_chain_tail = (lp->tx_chain_tail + 1) & RC32434_TDS_MASK;                          /* Move tail */
		}
	}
	dma_cache_wback((u32) td, sizeof(*td));
	
	dev->trans_start = jiffies;				
	
	spin_unlock_irqrestore(&lp->lock, flags);
	
	return 0;
}
Exemple #12
0
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
		size_t size, enum dma_data_direction dir)
{
	dma_cache_wback(paddr, size);
}
Exemple #13
0
static int korina_rx(struct net_device *dev, int limit)
{
	struct korina_private *lp = netdev_priv(dev);
	struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
	struct sk_buff *skb, *skb_new;
	u8 *pkt_buf;
	u32 devcs, pkt_len, dmas;
	int count;

	dma_cache_inv((u32)rd, sizeof(*rd));

	for (count = 0; count < limit; count++) {
		skb = lp->rx_skb[lp->rx_next_done];
		skb_new = NULL;

		devcs = rd->devcs;

		if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0)
			break;

		/* Update statistics counters */
		if (devcs & ETH_RX_CRC)
			dev->stats.rx_crc_errors++;
		if (devcs & ETH_RX_LOR)
			dev->stats.rx_length_errors++;
		if (devcs & ETH_RX_LE)
			dev->stats.rx_length_errors++;
		if (devcs & ETH_RX_OVR)
			dev->stats.rx_over_errors++;
		if (devcs & ETH_RX_CV)
			dev->stats.rx_frame_errors++;
		if (devcs & ETH_RX_CES)
			dev->stats.rx_length_errors++;
		if (devcs & ETH_RX_MP)
			dev->stats.multicast++;

		if ((devcs & ETH_RX_LD) != ETH_RX_LD) {
			/* check that this is a whole packet
			 * WARNING: DMA_FD bit incorrectly set
			 * in Rc32434 (errata ref #077) */
			dev->stats.rx_errors++;
			dev->stats.rx_dropped++;
		} else if ((devcs & ETH_RX_ROK)) {
			pkt_len = RCVPKT_LENGTH(devcs);

			/* must be the (first and) last
			 * descriptor then */
			pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;

			/* invalidate the cache */
			dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);

			/* Malloc up new buffer. */
			skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);

			if (!skb_new)
				break;
			/* Do not count the CRC */
			skb_put(skb, pkt_len - 4);
			skb->protocol = eth_type_trans(skb, dev);

			/* Pass the packet to upper layers */
			netif_receive_skb(skb);
			dev->stats.rx_packets++;
			dev->stats.rx_bytes += pkt_len;

			/* Update the mcast stats */
			if (devcs & ETH_RX_MP)
				dev->stats.multicast++;

			/* 16 bit align */
			skb_reserve(skb_new, 2);

			lp->rx_skb[lp->rx_next_done] = skb_new;
		}

		rd->devcs = 0;

		/* Restore descriptor's curr_addr */
		if (skb_new)
			rd->ca = CPHYSADDR(skb_new->data);
		else
			rd->ca = CPHYSADDR(skb->data);

		rd->control = DMA_COUNT(KORINA_RBSIZE) |
			DMA_DESC_COD | DMA_DESC_IOD;
		lp->rd_ring[(lp->rx_next_done - 1) &
			KORINA_RDS_MASK].control &=
			~DMA_DESC_COD;

		lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
		dma_cache_wback((u32)rd, sizeof(*rd));
		rd = &lp->rd_ring[lp->rx_next_done];
		writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
	}

	dmas = readl(&lp->rx_dma_regs->dmas);

	if (dmas & DMA_STAT_HALT) {
		writel(~(DMA_STAT_HALT | DMA_STAT_ERR),
				&lp->rx_dma_regs->dmas);

		lp->dma_halt_cnt++;
		rd->devcs = 0;
		skb = lp->rx_skb[lp->rx_next_done];
		rd->ca = CPHYSADDR(skb->data);
		dma_cache_wback((u32)rd, sizeof(*rd));
		korina_chain_rx(lp, rd);
	}

	return count;
}
Exemple #14
0
/* transmit packet */
static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
{
	struct korina_private *lp = netdev_priv(dev);
	unsigned long flags;
	u32 length;
	u32 chain_prev, chain_next;
	struct dma_desc *td;

	spin_lock_irqsave(&lp->lock, flags);

	td = &lp->td_ring[lp->tx_chain_tail];

	/* stop queue when full, drop pkts if queue already full */
	if (lp->tx_count >= (KORINA_NUM_TDS - 2)) {
		lp->tx_full = 1;

		if (lp->tx_count == (KORINA_NUM_TDS - 2))
			netif_stop_queue(dev);
		else {
			dev->stats.tx_dropped++;
			dev_kfree_skb_any(skb);
			spin_unlock_irqrestore(&lp->lock, flags);

			return NETDEV_TX_BUSY;
		}
	}

	lp->tx_count++;

	lp->tx_skb[lp->tx_chain_tail] = skb;

	length = skb->len;
	dma_cache_wback((u32)skb->data, skb->len);

	/* Setup the transmit descriptor. */
	dma_cache_inv((u32) td, sizeof(*td));
	td->ca = CPHYSADDR(skb->data);
	chain_prev = (lp->tx_chain_tail - 1) & KORINA_TDS_MASK;
	chain_next = (lp->tx_chain_tail + 1) & KORINA_TDS_MASK;

	if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
		if (lp->tx_chain_status == desc_empty) {
			/* Update tail */
			td->control = DMA_COUNT(length) |
					DMA_DESC_COF | DMA_DESC_IOF;
			/* Move tail */
			lp->tx_chain_tail = chain_next;
			/* Write to NDPTR */
			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
					&lp->tx_dma_regs->dmandptr);
			/* Move head to tail */
			lp->tx_chain_head = lp->tx_chain_tail;
		} else {
			/* Update tail */
			td->control = DMA_COUNT(length) |
					DMA_DESC_COF | DMA_DESC_IOF;
			/* Link to prev */
			lp->td_ring[chain_prev].control &=
					~DMA_DESC_COF;
			/* Link to prev */
			lp->td_ring[chain_prev].link =  CPHYSADDR(td);
			/* Move tail */
			lp->tx_chain_tail = chain_next;
			/* Write to NDPTR */
			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
					&(lp->tx_dma_regs->dmandptr));
			/* Move head to tail */
			lp->tx_chain_head = lp->tx_chain_tail;
			lp->tx_chain_status = desc_empty;
		}
	} else {
		if (lp->tx_chain_status == desc_empty) {
			/* Update tail */
			td->control = DMA_COUNT(length) |
					DMA_DESC_COF | DMA_DESC_IOF;
			/* Move tail */
			lp->tx_chain_tail = chain_next;
			lp->tx_chain_status = desc_filled;
		} else {
			/* Update tail */
			td->control = DMA_COUNT(length) |
					DMA_DESC_COF | DMA_DESC_IOF;
			lp->td_ring[chain_prev].control &=
					~DMA_DESC_COF;
			lp->td_ring[chain_prev].link =  CPHYSADDR(td);
			lp->tx_chain_tail = chain_next;
		}
	}
	dma_cache_wback((u32) td, sizeof(*td));

	dev->trans_start = jiffies;
	spin_unlock_irqrestore(&lp->lock, flags);

	return NETDEV_TX_OK;
}
Exemple #15
0
static void H264ppWorkAroundGNBvd42331( H264ppIndividualContext_t	 *SubContext,
					unsigned int			  N )
{
unsigned int	i;
unsigned int	mb_adaptive_frame_field_flag;
unsigned int	entropy_coding_mode_flag;
unsigned int	PerformWorkaround;
unsigned int	SavedITM;
unsigned int	BufferBase;
unsigned int	SourceAddress;
unsigned int	EndAddress;
unsigned int	SliceErrorStatusAddress;
unsigned int	IntermediateAddress;
unsigned int	IntermediateEndAddress;

    //
    // Do we have to worry.
    //

    mb_adaptive_frame_field_flag			= ((SubContext->Parameters.Cfg & 1) != 0);
    entropy_coding_mode_flag				= ((SubContext->Parameters.Cfg & 2) != 0);

    PerformWorkaround					= !mb_adaptive_frame_field_flag && 
							  SubContext->last_mb_adaptive_frame_field_flag &&
							  entropy_coding_mode_flag;

    SubContext->last_mb_adaptive_frame_field_flag	= mb_adaptive_frame_field_flag;

    if( !PerformWorkaround && !SubContext->ForceWorkAroundGNBvd42331 )
	return;

//OSDEV_Print( "H264ppWorkAroundGNBvd42331 - Deploying GNBvd42331 workaround block to PP %d - %08x.\n", N, SubContext->Parameters.Cfg );

    SubContext->ForceWorkAroundGNBvd42331	= 0;

    //
    // we transfer the workaround stream to the output buffer (offset by 64k to not interfere with the output).
    //

    memcpy( (void *)((unsigned int)SubContext->Parameters.BufferCachedAddress + 0x10000), GNBvd42331Data, sizeof(GNBvd42331Data) );

    GNBvd42331DataPhysicalAddress	= (unsigned char *)SubContext->Parameters.BufferPhysicalAddress + 0x10000;

#ifdef __TDT__
/* found somewhere this patch which should increase performance about 1%.
 * ->should be revised!
 */

#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
    dma_cache_wback((SubContext->Parameters.BufferCachedAddress + 0x10000),sizeof(GNBvd42331Data));
#else
    writeback_ioremap_region(0, (SubContext->Parameters.BufferCachedAddress + 0x10000),
    		0, sizeof(GNBvd42331Data));
#endif

#else
    flush_cache_all();
#endif

    //
    // Derive the pointers - we use the next buffer to be queued as output as our output 
    //

    BufferBase                  = (unsigned int)SubContext->Parameters.BufferPhysicalAddress;

    SliceErrorStatusAddress     = BufferBase;
    IntermediateAddress         = BufferBase + H264_PP_SESB_SIZE;
    IntermediateEndAddress      = IntermediateAddress + H264_PP_OUTPUT_SIZE - 1;
    SourceAddress               = (unsigned int)GNBvd42331DataPhysicalAddress;
    EndAddress                  = (unsigned int)GNBvd42331DataPhysicalAddress + sizeof(GNBvd42331Data) - 1;

    //
    // Launch the workaround block
    //

    SavedITM		= OSDEV_ReadLong(PP_ITM(N));			// Turn off interrupts
    OSDEV_WriteLong( PP_ITM(N), 0 );

    OSDEV_WriteLong( PP_BBG(N),                 (SourceAddress & 0xfffffff8) );
    OSDEV_WriteLong( PP_BBS(N),                 (EndAddress    | 0x7) );
    OSDEV_WriteLong( PP_READ_START(N),          SourceAddress );
    OSDEV_WriteLong( PP_READ_STOP(N),           EndAddress );

    OSDEV_WriteLong( PP_ISBG(N),                SliceErrorStatusAddress );
    OSDEV_WriteLong( PP_IPBG(N),                IntermediateAddress );
    OSDEV_WriteLong( PP_IBS(N),                 IntermediateEndAddress );

    OSDEV_WriteLong( PP_CFG(N),                 GNBvd42331_CFG );
    OSDEV_WriteLong( PP_PICWIDTH(N),            GNBvd42331_PICWIDTH );
    OSDEV_WriteLong( PP_CODELENGTH(N),          GNBvd42331_CODELENGTH );

    OSDEV_WriteLong( PP_ITS(N),                 0xffffffff );		// Clear interrupt status
    OSDEV_WriteLong( PP_START(N),               1 );

    //
    // Wait for it to complete
    //

    for( i=0; i<H264_PP_RESET_TIME_LIMIT; i++ )
    {
	OSDEV_SleepMilliSeconds( 1 );

	if( (OSDEV_ReadLong(PP_ITS(N)) & PP_ITM__DMA_CMP) != 0 )
	    break;

    }

    if( (i == H264_PP_RESET_TIME_LIMIT) || (OSDEV_ReadLong(PP_ITS(N)) != PP_ITM__DMA_CMP) )
	OSDEV_Print( "H264ppWorkAroundGNBvd42331 - Failed to execute GNBvd42331 workaround block to PP %d (ITS %08x).\n", N, OSDEV_ReadLong(PP_ITS(N)) );

    //
    // Restore the interrupts
    //

    OSDEV_WriteLong( PP_ITS(N),                     0xffffffff );           // Clear interrupt status
    OSDEV_WriteLong( PP_ITM(N), SavedITM );

}
Exemple #16
0
static void rc32434_rx_tasklet(unsigned long rx_data_dev)
#endif
{
	struct net_device *dev = (struct net_device *)rx_data_dev;	
	struct rc32434_local* lp = netdev_priv(dev);
	volatile DMAD_t  rd = &lp->rd_ring[lp->rx_next_done];
	struct sk_buff *skb, *skb_new;
	u8* pkt_buf;
	u32 devcs, count, pkt_len, pktuncrc_len;
	volatile u32 dmas;
#ifdef CONFIG_IDT_USE_NAPI
	u32 received = 0;
	int rx_work_limit = min(*budget,dev->quota);
#else
	unsigned long 	flags;
	spin_lock_irqsave(&lp->lock, flags);
#endif

	dma_cache_inv((u32)rd, sizeof(*rd));
	while ( (count = RC32434_RBSIZE - (u32)DMA_COUNT(rd->control)) != 0) {
#ifdef CONFIG_IDT_USE_NAPI
		if(--rx_work_limit <0)
                {
                        break;
                }
#endif
		/* init the var. used for the later operations within the while loop */
		skb_new = NULL;
		devcs = rd->devcs;
		pkt_len = RCVPKT_LENGTH(devcs);
		skb = lp->rx_skb[lp->rx_next_done];
      
		if (count < 64) {
			lp->stats.rx_errors++;
			lp->stats.rx_dropped++;			
		}
		else if ((devcs & ( ETHRX_ld_m)) !=	ETHRX_ld_m) {
			/* check that this is a whole packet */
			/* WARNING: DMA_FD bit incorrectly set in Rc32434 (errata ref #077) */
			lp->stats.rx_errors++;
			lp->stats.rx_dropped++;
		}
		else if ( (devcs & ETHRX_rok_m)  ) {
			
			{
				/* must be the (first and) last descriptor then */
				pkt_buf = (u8*)lp->rx_skb[lp->rx_next_done]->data;
				
				pktuncrc_len = pkt_len - 4;
				/* invalidate the cache */
				dma_cache_inv((unsigned long)pkt_buf, pktuncrc_len);
				
				/* Malloc up new buffer. */					  
				skb_new = dev_alloc_skb(RC32434_RBSIZE + 2);					             	
				
				if (skb_new != NULL){
					/* Make room */
					skb_put(skb, pktuncrc_len);		    
					
					skb->protocol = eth_type_trans(skb, dev);
					
					/* pass the packet to upper layers */
#ifdef CONFIG_IDT_USE_NAPI
					netif_receive_skb(skb);
#else
					netif_rx(skb);
#endif
					
					dev->last_rx = jiffies;
					lp->stats.rx_packets++;
					lp->stats.rx_bytes += pktuncrc_len;
					
					if (IS_RCV_MP(devcs))
						lp->stats.multicast++;
					
					/* 16 bit align */						  
					skb_reserve(skb_new, 2);	
					
					skb_new->dev = dev;
					lp->rx_skb[lp->rx_next_done] = skb_new;
				}
				else {
					ERR("no memory, dropping rx packet.\n");
					lp->stats.rx_errors++;		
					lp->stats.rx_dropped++;					
				}
			}
			
		}			
		else {
			/* This should only happen if we enable accepting broken packets */
			lp->stats.rx_errors++;
			lp->stats.rx_dropped++;
			
			/* add statistics counters */
			if (IS_RCV_CRC_ERR(devcs)) {
				DBG(2, "RX CRC error\n");
				lp->stats.rx_crc_errors++;
			} 
			else if (IS_RCV_LOR_ERR(devcs)) {
				DBG(2, "RX LOR error\n");
				lp->stats.rx_length_errors++;
			}				
			else if (IS_RCV_LE_ERR(devcs)) {
				DBG(2, "RX LE error\n");
				lp->stats.rx_length_errors++;
			}
			else if (IS_RCV_OVR_ERR(devcs)) {
				lp->stats.rx_over_errors++;
			}
			else if (IS_RCV_CV_ERR(devcs)) {
				/* code violation */
				DBG(2, "RX CV error\n");
				lp->stats.rx_frame_errors++;
			}
			else if (IS_RCV_CES_ERR(devcs)) {
				DBG(2, "RX Preamble error\n");
			}
		}
		
		rd->devcs = 0;
		
		/* restore descriptor's curr_addr */
		if(skb_new)
			rd->ca = CPHYSADDR(skb_new->data); 
		else
			rd->ca = CPHYSADDR(skb->data);
		
		rd->control = DMA_COUNT(RC32434_RBSIZE) |DMAD_cod_m |DMAD_iod_m;
		lp->rd_ring[(lp->rx_next_done-1)& RC32434_RDS_MASK].control &=  ~(DMAD_cod_m); 	
		
		lp->rx_next_done = (lp->rx_next_done + 1) & RC32434_RDS_MASK;
		dma_cache_wback((u32)rd, sizeof(*rd));
		rd = &lp->rd_ring[lp->rx_next_done];
		__raw_writel( ~DMAS_d_m, &lp->rx_dma_regs->dmas);
	}	
#ifdef CONFIG_IDT_USE_NAPI
        dev->quota -= received;
        *budget =- received;
        if(rx_work_limit < 0)
                goto not_done;
#endif
	
	dmas = __raw_readl(&lp->rx_dma_regs->dmas);
	
	if(dmas & DMAS_h_m) {
		__raw_writel( ~(DMAS_h_m | DMAS_e_m), &lp->rx_dma_regs->dmas);
#ifdef RC32434_PROC_DEBUG
		lp->dma_halt_cnt++;
#endif
		rd->devcs = 0;
		skb = lp->rx_skb[lp->rx_next_done];
		rd->ca = CPHYSADDR(skb->data);
		dma_cache_wback((u32)rd, sizeof(*rd));
		rc32434_chain_rx(lp,rd);
	}
	
#ifdef CONFIG_IDT_USE_NAPI
	netif_rx_complete(dev);
#endif
	/* Enable D H E bit in Rx DMA */
	__raw_writel(__raw_readl(&lp->rx_dma_regs->dmasm) & ~(DMASM_d_m | DMASM_h_m |DMASM_e_m), &lp->rx_dma_regs->dmasm); 
#ifdef CONFIG_IDT_USE_NAPI
	return 0;
 not_done:
	return 1;
#else
	spin_unlock_irqrestore(&lp->lock, flags);
	return;
#endif

	
}	
Exemple #17
0
static int rc32434_rx(struct net_device *dev, int limit)
{
        struct rc32434_local *lp = netdev_priv(dev);
	volatile DMAD_t  rd = &lp->rd_ring[lp->rx_next_done];
        struct sk_buff *skb, *skb_new;
        u8 *pkt_buf;
        u32 devcs, pkt_len, dmas, rx_free_desc;
	u32 pktuncrc_len;
        int count;

	dma_cache_inv((u32)rd, sizeof(*rd));
	for (count = 0; count < limit; count++) {
		/* init the var. used for the later operations within the while loop */
		skb_new = NULL;
		devcs = rd->devcs;
		pkt_len = RCVPKT_LENGTH(devcs);
		skb = lp->rx_skb[lp->rx_next_done];
      
		if ((devcs & ( ETHRX_ld_m)) !=	ETHRX_ld_m) {
			/* check that this is a whole packet */
			/* WARNING: DMA_FD bit incorrectly set in Rc32434 (errata ref #077) */
			lp->stats.rx_errors++;
			lp->stats.rx_dropped++;
		}
		else if ( (devcs & ETHRX_rok_m)  ) {
			
				/* must be the (first and) last descriptor then */
				pkt_buf = (u8*)lp->rx_skb[lp->rx_next_done]->data;
				
				pktuncrc_len = pkt_len - 4;
				/* invalidate the cache */
				dma_cache_inv((unsigned long)pkt_buf, pktuncrc_len);
				
				/* Malloc up new buffer. */					  
				skb_new = netdev_alloc_skb(dev, RC32434_RBSIZE + 2);					             	
				
				if (skb_new != NULL){
					/* Make room */
					skb_put(skb, pktuncrc_len);		    
					
					skb->protocol = eth_type_trans(skb, dev);
					
					/* pass the packet to upper layers */
					netif_receive_skb(skb);
					
					dev->last_rx = jiffies;
					lp->stats.rx_packets++;
					lp->stats.rx_bytes += pktuncrc_len;
					
					if (IS_RCV_MP(devcs))
						lp->stats.multicast++;
					
					/* 16 bit align */						  
					skb_reserve(skb_new, 2);	
					
					skb_new->dev = dev;
					lp->rx_skb[lp->rx_next_done] = skb_new;
				}
				else {
					ERR("no memory, dropping rx packet.\n");
					lp->stats.rx_errors++;		
					lp->stats.rx_dropped++;					
				}
		}			
		else {
			/* This should only happen if we enable accepting broken packets */
			lp->stats.rx_errors++;
			lp->stats.rx_dropped++;
			
			/* add statistics counters */
			if (IS_RCV_CRC_ERR(devcs)) {
				DBG(2, "RX CRC error\n");
				lp->stats.rx_crc_errors++;
			} 
			else if (IS_RCV_LOR_ERR(devcs)) {
				DBG(2, "RX LOR error\n");
				lp->stats.rx_length_errors++;
			}				
			else if (IS_RCV_LE_ERR(devcs)) {
				DBG(2, "RX LE error\n");
				lp->stats.rx_length_errors++;
			}
			else if (IS_RCV_OVR_ERR(devcs)) {
				lp->stats.rx_over_errors++;
			}
			else if (IS_RCV_CV_ERR(devcs)) {
				/* code violation */
				DBG(2, "RX CV error\n");
				lp->stats.rx_frame_errors++;
			}
			else if (IS_RCV_CES_ERR(devcs)) {
				DBG(2, "RX Preamble error\n");
			}
		}
		rd->devcs = 0;
		
		/* restore descriptor's curr_addr */
		if(skb_new) {
			rd->ca = CPHYSADDR(skb_new->data);
		}
		else
			rd->ca = CPHYSADDR(skb->data);
		
		rd->control = DMA_COUNT(RC32434_RBSIZE) |DMAD_cod_m |DMAD_iod_m;
		lp->rd_ring[(lp->rx_next_done-1)& RC32434_RDS_MASK].control &=  ~(DMAD_cod_m); 	
		
		lp->rx_next_done = (lp->rx_next_done + 1) & RC32434_RDS_MASK;
		dma_cache_wback((u32)rd, sizeof(*rd));
		rd = &lp->rd_ring[lp->rx_next_done];
		__raw_writel( ~DMAS_d_m, &lp->rx_dma_regs->dmas);
	}	
	
	dmas = __raw_readl(&lp->rx_dma_regs->dmas);
	
	if(dmas & DMAS_h_m) {
		/* Mask off halt and error bits */
		__raw_writel( ~(DMAS_h_m | DMAS_e_m), &lp->rx_dma_regs->dmas);
#ifdef RC32434_PROC_DEBUG
		lp->dma_halt_cnt++;
#endif
		rd->devcs = 0;
		skb = lp->rx_skb[lp->rx_next_done];
		rd->ca = CPHYSADDR(skb->data);
		dma_cache_wback((u32)rd, sizeof(*rd));
		rc32434_chain_rx(lp,rd);
	}
	
	return count;
}
Exemple #18
0
/*
 * tx request callback
 */
static int enet_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct tangox_enet_priv *priv;
	volatile struct enet_desc *tx=NULL, *ptx=NULL;
	unsigned long tconfig_cache;
	unsigned long val = 0;
	volatile u32 *r_addr;
	int len = 0;
	int tx_busy = 0;
	unsigned char *txbuf;

	priv = netdev_priv(dev);
	spin_lock(&priv->tx_lock);

	val = enet_readl(ENET_TXC_CR(priv->enet_mac_base)) & 0xffff;
#ifndef ENABLE_TX_CHAINING
#ifdef CONFIG_TANGOX_ENET_TX_DELAY_1000US
#define MAX_TX_TIMEOUT	1000	/* usec */
#else
#define MAX_TX_TIMEOUT	100	/* usec */
#endif
	for (len = 0; len < MAX_TX_TIMEOUT; len++) {
		val = enet_readl(ENET_TXC_CR(priv->enet_mac_base)) & 0xffff;
		if (val & TCR_EN)
			udelay(1);
		else
			break;
	}
	if (len >= MAX_TX_TIMEOUT) {
		priv->stats.tx_dropped++;
		spin_unlock(&priv->tx_lock);
		return NETDEV_TX_BUSY;
	}
#else
	if (val & TCR_EN){ 
		//BUG_ON(skb == NULL);
		tx_busy = 1;
		if (priv->pending_tx < 0)
			priv->pending_tx = priv->next_tx_desc;
	} 

	if (tx_busy && (priv->pending_tx >= 0) && (priv->pending_tx_cnt >= (TX_DESC_COUNT -1))) {
		DBG(KERN_WARNING PFX "no more tx desc can be scheduled in pending queue.\n");
		netif_stop_queue(dev);
		spin_unlock(&priv->tx_lock);
		return NETDEV_TX_BUSY;
	}
		
	if (skb == NULL) {
		unsigned int last_tx;		
		last_tx = (priv->next_tx_desc - 1 + TX_DESC_COUNT) % TX_DESC_COUNT;
		tx = &priv->tx_descs[last_tx];
		tx->config |= DESC_EOC;
		priv->tx_eoc = last_tx;
		mb();
		goto tx_pending;
	}
#endif
	len = skb->len;
	tx = &priv->tx_descs[priv->next_tx_desc];

	/* fill the tx desc with this skb address */
	tconfig_cache = 0;
	tconfig_cache |= DESC_BTS(2);
	tconfig_cache |= DESC_EOF;
	tconfig_cache |= len; 

	if (((unsigned long)(skb->data) & 0x7) != 0) { /* not align by 8 bytes */
		txbuf = priv->tx_bufs[priv->next_tx_desc];
		memcpy(txbuf, skb->data, len); 
		dma_cache_wback((unsigned long)txbuf, len);
		tx->s_addr = PHYSADDR((void *)txbuf);
	} else {
		dma_cache_wback((unsigned long)skb->data, len);
		tx->s_addr = PHYSADDR(skb->data);
	}

	if (tx_busy != 0) {
		tx->n_addr = PHYSADDR((void *)&(priv->tx_descs[(priv->next_tx_desc + 1) % TX_DESC_COUNT]));
	} else {
		tx->n_addr = 0;
		tconfig_cache |= DESC_EOC;
		priv->tx_eoc = priv->next_tx_desc;
	}
	tx->config = tconfig_cache;

	/* keep a pointer to it for later and give it to dma  */
	priv->tx_skbs[priv->next_tx_desc] = skb;

	r_addr = (volatile u32 *)KSEG1ADDR((u32)(&(priv->tx_report[priv->next_tx_desc])));
	__raw_writel(0, r_addr);
	priv->next_tx_desc++;
	priv->next_tx_desc %= TX_DESC_COUNT;

#ifdef ETH_DEBUG
	{
	int i;			
 	for(i=0; i<len; i++){
		if(i%16==0 && i>0)
			DBG("\n");
		DBG("%02x ", txbuf[i] & 0xff);
	}
	DBG("\n");

	DBG("DESC Mode:  TXC_CR=0x%x  desc_addr=0x%x s_addr=0x%x n_addr=0x%x r_addr=0x%x config=0x%x\n",
			enet_readl(ENET_TXC_CR(priv->enet_mac_base)), tx,
			tx->s_addr, tx->n_addr,
			tx->r_addr, tx->config); 
	}
#endif

tx_pending:
	if (tx_busy == 0) {
		if (priv->pending_tx >= 0) {
			ptx = &priv->tx_descs[priv->pending_tx];
			len = ptx->config & 0xffff;

			enet_writel(ENET_TX_DESC_ADDR(priv->enet_mac_base), PHYSADDR((void *)ptx));
			priv->reclaim_limit = priv->pending_tx;
			priv->pending_tx = -1;
		} else {
			priv->reclaim_limit = (priv->next_tx_desc - 1 + TX_DESC_COUNT) % TX_DESC_COUNT;
			enet_writel(ENET_TX_DESC_ADDR(priv->enet_mac_base), PHYSADDR((void *)tx));
		}

		enet_writel(ENET_TX_SAR(priv->enet_mac_base), 0);
		enet_writel(ENET_TX_REPORT_ADDR(priv->enet_mac_base), 0);

		/* kick tx dma in case it was suspended */
		val |= TCR_EN; 
		val |= TCR_BTS(2); 
		val |= (len << 16); 
		enet_writel(ENET_TXC_CR(priv->enet_mac_base), val);

		/* no pending at this stage*/
		priv->pending_tx_cnt = 0;
	} else 
		priv->pending_tx_cnt++;

	/* if next tx descriptor is not  clean, then we have to stop
	 * queue */
	if (unlikely(--priv->free_tx_desc_count == 0))
		netif_stop_queue(dev);

	spin_unlock(&priv->tx_lock);

	return NETDEV_TX_OK;
}
Exemple #19
0
int
dma_device_write (struct dma_device_info *dma_dev, u8 * dataptr, int len,
		  void *opt)
{
	int flag;
	u32 tmp, byte_offset;
	_dma_channel_info *pCh;
	int chan_no;
	struct tx_desc *tx_desc_p;
	local_irq_save (flag);

	pCh = dma_dev->tx_chan[dma_dev->current_tx_chan];
	chan_no = (int) (pCh - (_dma_channel_info *) dma_chan);

	tx_desc_p = (struct tx_desc *) pCh->desc_base + pCh->prev_desc;
	while (tx_desc_p->status.field.OWN == CPU_OWN
	       && tx_desc_p->status.field.C) {
		dma_dev->buffer_free ((u8 *) __va (tx_desc_p->Data_Pointer),
				      pCh->opt[pCh->prev_desc]);
		memset (tx_desc_p, 0, sizeof (struct tx_desc));
		pCh->prev_desc = (pCh->prev_desc + 1) % (pCh->desc_len);
		tx_desc_p =
			(struct tx_desc *) pCh->desc_base + pCh->prev_desc;
	}
	tx_desc_p = (struct tx_desc *) pCh->desc_base + pCh->curr_desc;
	/*Check whether this descriptor is available */
	if (tx_desc_p->status.field.OWN == DMA_OWN
	    || tx_desc_p->status.field.C) {
		/*if not , the tell the upper layer device */
		dma_dev->intr_handler (dma_dev, TX_BUF_FULL_INT);
                local_irq_restore(flag);
		printk (KERN_INFO "%s %d: failed to write!\n", __func__,
			__LINE__);
		return 0;
	}
	pCh->opt[pCh->curr_desc] = opt;
	/*byte offset----to adjust the starting address of the data buffer, should be multiple of the burst length. */
	byte_offset =
		((u32) CPHYSADDR ((u32) dataptr)) % ((dma_dev->tx_burst_len) *
						     4);
#ifndef CONFIG_MIPS_UNCACHED
	dma_cache_wback ((unsigned long) dataptr, len);
	wmb ();
#endif //CONFIG_MIPS_UNCACHED

	tx_desc_p->Data_Pointer =
		(u32) CPHYSADDR ((u32) dataptr) - byte_offset;
	wmb ();
	tx_desc_p->status.word = (DMA_OWN << 31)
		| DMA_DESC_SOP_SET | DMA_DESC_EOP_SET | ((byte_offset) << 23)
		| len;
	wmb ();

	pCh->curr_desc++;
	if (pCh->curr_desc == pCh->desc_len)
		pCh->curr_desc = 0;

	/*Check whether this descriptor is available */
	tx_desc_p = (struct tx_desc *) pCh->desc_base + pCh->curr_desc;
	if (tx_desc_p->status.field.OWN == DMA_OWN) {
		/*if not , the tell the upper layer device */
		dma_dev->intr_handler (dma_dev, TX_BUF_FULL_INT);
	}
	DANUBE_DMA_RDREG_PROT (chan_no, DANUBE_DMA_CCTRL, tmp);
	if (!(tmp & 1))
		pCh->open (pCh);

	local_irq_restore (flag);
	return len;
}
Exemple #20
0
static int ptm_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
    int ndev;
    unsigned int f_full;
    int desc_base;
    register struct tx_descriptor reg_desc = {0};

    for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
    ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);

    if ( !g_showtime ) {
        err("not in showtime");
        goto PTM_HARD_START_XMIT_FAIL;
    }

    /*  allocate descriptor */
    desc_base = get_tx_desc(ndev, &f_full);
    if ( f_full ) {
        dev->trans_start = jiffies;
        netif_stop_queue(dev);

        IFX_REG_W32_MASK(0, 1 << (ndev + 16), MBOX_IGU1_ISRC);
        IFX_REG_W32_MASK(0, 1 << (ndev + 16), MBOX_IGU1_IER);
    }
    if ( desc_base < 0 )
        goto PTM_HARD_START_XMIT_FAIL;

    if ( g_ptm_priv_data.itf[ndev].tx_skb[desc_base] != NULL )
        dev_kfree_skb_any(g_ptm_priv_data.itf[ndev].tx_skb[desc_base]);
    g_ptm_priv_data.itf[ndev].tx_skb[desc_base] = skb;

    reg_desc.dataptr = (unsigned int)skb->data >> 2;
    reg_desc.datalen = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
    reg_desc.byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
    reg_desc.own     = 1;
    reg_desc.c       = 1;
    reg_desc.sop = reg_desc.eop = 1;

    /*  write discriptor to memory and write back cache */
    g_ptm_priv_data.itf[ndev].tx_desc[desc_base] = reg_desc;
    dma_cache_wback((unsigned long)skb->data, skb->len);
    wmb();

    dump_skb(skb, DUMP_SKB_LEN, (char *)__func__, ndev, ndev, 1);

    if ( (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_MAC_SWAP) ) {
        skb_swap(skb);
    }

    g_ptm_priv_data.itf[ndev].stats.tx_packets++;
    g_ptm_priv_data.itf[ndev].stats.tx_bytes += reg_desc.datalen;

    dev->trans_start = jiffies;
    mailbox_signal(ndev, 1);

    adsl_led_flash();

    return NETDEV_TX_OK;

PTM_HARD_START_XMIT_FAIL:
    dev_kfree_skb_any(skb);
    g_ptm_priv_data.itf[ndev].stats.tx_dropped++;
    return NETDEV_TX_OK;
}