示例#1
0
/*******************************************************************************
* ddr3NewTipEccScrub - Scrub the DRAM
*
* DESCRIPTION:
*
* INPUT:
*
* OUTPUT:
*       None.
*
* RETURN:
*
*******************************************************************************/
MV_VOID	ddr3NewTipEccScrub()
{
	MV_U32 cs_c,max_cs;
	MV_U32 uiCsEna = 0;
    mvPrintf("DDR Training Sequence - Start scrubbing \n");

	max_cs = mvHwsDdr3TipMaxCSGet();
	for (cs_c = 0; cs_c < max_cs; cs_c++)
		uiCsEna |= 1 << cs_c;

	mvSysXorInit(max_cs, uiCsEna, 0x80000000, 0);

	mvXorMemInit(0, 0x00000000, 0x80000000, 0xdeadbeef, 0xdeadbeef);
    /* wait for previous transfer completion */
    while (mvXorStateGet(0) != MV_IDLE);

	mvXorMemInit(0, 0x80000000, 0x40000000, 0xdeadbeef, 0xdeadbeef);
    /* wait for previous transfer completion */
    while (mvXorStateGet(0) != MV_IDLE);

    /* Return XOR State */
    mvSysXorFinish();

    mvPrintf("DDR Training Sequence - End scrubbing \n");
}
示例#2
0
/*=======================================================================*/
void xor_memzero(void *to, __kernel_size_t n)
{
	u32 xor_dma_unaligned_to;
	u32 to_pa;
        int ua = 0;
        int         chan;
        struct xor_channel_t *channel;

	DPRINTK("xor_memzero(0x%x, %lu): entering\n", (u32) to, (unsigned long)n);

        if (xor_engine_initialized == 0)
        {
            DPRINTK(KERN_WARNING" %s: xor engines not initialized yet\n", __func__);
       	    xor_memzero_miss++;
	    return asm_memzero(to, n);
        }
 	if (!(virt_addr_valid((u32) to))) {
		DPRINTK("xor_memcpy(0x%x, %lu): falling back to memzero\n",
			(u32) to, (unsigned long)n);
		xor_memzero_miss++;
		return asm_memzero(to, n);
	}

	/*
	 * We can only handled completely cache-aligned transactions
	 * with the DMA engine.  Dst must be cache-line
	 * aligned AND the length must be a multiple of the cache-line.
	 */

	to_pa = virt_to_phys(to);

	/*
	 * Ok, start addr is not cache line-aligned, so we need to make it so.
	 */
	xor_dma_unaligned_to = (u32) to & 31;
	if (xor_dma_unaligned_to)
        {
            ua++;
	    asm_memzero(to, 32 - xor_dma_unaligned_to);
            to = (void *)((u32)to + 32 - xor_dma_unaligned_to);
	    n -= 32 - xor_dma_unaligned_to;
	}

	/*
	 * Ok, we're aligned at the top, now let's check the end
	 * of the buffer and align that. After this we should have
	 * a block that is a multiple of cache line size.
	 */
	xor_dma_unaligned_to = ((u32) to + n) & 31;
	if (xor_dma_unaligned_to) {
	    u32 tmp_to = (u32) to + n - xor_dma_unaligned_to;
	    asm_memzero((void *)tmp_to, xor_dma_unaligned_to);
            n -= xor_dma_unaligned_to;
	    ua++;
	}

	/*
	 * OK! We should now be fully aligned on both ends. 
	 */
        chan = allocate_channel();
        if ( chan == -1)
         {
                DPRINTK("XOR engines are busy, return\n");
       		xor_memzero_miss++;
		return asm_memzero(to, n);
        }
        if (down_trylock(&meminit_sema))
        {
            DPRINTK("meminit is used by one of the XOR engines\n");
            xor_memzero_miss++;
            free_channel(&xor_channel[chan]);
	    return asm_memzero(to, n);
        }

	DPRINTK("setting up rest of descriptor for channel %d\n", chan);
        channel = &xor_channel[chan];
	
        /* Ensure that the cache is clean */
	dmac_inv_range(to, to + n);

	channel->chan_active = 1;

	DPRINTK("setting up rest of descriptor\n");
        if( mvXorMemInit(chan, virt_to_phys(to), n, 0, 0) != MV_OK)
        {
            printk(KERN_ERR "%s: DMA memzero operation on channel %d failed. to %p len %d!\n", __func__, chan,
                to, n);
            free_channel(channel);
            up(&meminit_sema);
       	    return asm_memzero(to, n);
        }
        xor_waiton_eng(chan);


        DPRINTK("DMA memzero complete\n");
	// check to see if failed
        up(&meminit_sema);
        free_channel(channel);
	xor_memzero_hit++;
	if (ua)
		xor_memzero_unaligned++;

}