/*******************************************************************************
* mvXorHalInit - Initialize XOR engine
*
* DESCRIPTION:
*               This function initialize XOR unit.
* INPUT:
*       None.
*
* OUTPUT:
*       None.
*
* RETURN:
*       GT_BAD_PARAM if parameters to function invalid, GT_OK otherwise.
*******************************************************************************/
GT_VOID mvXorHalInit(GT_U32 xorChanNum)
{
	GT_U32 i;
	/* Abort any XOR activity & set default configuration */
	for (i = 0; i < xorChanNum; i++) {
		mvXorCommandSet(i, MV_STOP);
		mvXorCtrlSet(i, (1 << XEXCR_REG_ACC_PROTECT_OFFS) |
			     (4 << XEXCR_DST_BURST_LIMIT_OFFS) | (4 << XEXCR_SRC_BURST_LIMIT_OFFS)
#if defined(MV_CPU_BE)
/*				| (1 << XEXCR_DRD_RES_SWP_OFFS)
				| (1 << XEXCR_DWR_REQ_SWP_OFFS)
*/
			     | (1 << XEXCR_DES_SWP_OFFS)
#endif
		    );
	}

}
Example #2
0
/*******************************************************************************
* mvXorHalInit - Initialize XOR engine
*
* DESCRIPTION:
*               This function initialize XOR unit.
* INPUT:
*       None.
*
* OUTPUT:
*       None.
*
* RETURN:
*       MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
*******************************************************************************/
MV_VOID mvXorHalInit(MV_U32 unit)
{
	MV_U32 i;
	MV_U32 maxChan;

	/* Abort any XOR activity & set default configuration */
	/* loop over unit channels */
	maxChan = (MV_XOR_MAX_CHAN_PER_UNIT * unit) + MV_XOR_MAX_CHAN_PER_UNIT;
	for (i = (MV_XOR_MAX_CHAN_PER_UNIT * unit); i < maxChan; i++) {
		mvXorCommandSet(i, MV_STOP);
		mvXorCtrlSet(i, (1 << XEXCR_REG_ACC_PROTECT_OFFS) |
			     (4 << XEXCR_DST_BURST_LIMIT_OFFS) | (4 << XEXCR_SRC_BURST_LIMIT_OFFS)
#if defined(MV_CPU_BE)
/*				| (1 << XEXCR_DRD_RES_SWP_OFFS)
				| (1 << XEXCR_DWR_REQ_SWP_OFFS)
*/
			     | (1 << XEXCR_DES_SWP_OFFS)
#endif
		    );
	}

}
Example #3
0
int mv_xor_init(void)
{
    int chan;
#ifdef CONFIG_ENABLE_XOR_INTERRUPTS
    int err = 0;
#endif
    char *mode = "acceleration";

#ifdef CONFIG_ENABLE_XOR_INTERRUPTS
    mode = "offloading";
#endif
    
    printk(KERN_INFO "Use the XOR engines (%s) for enhancing the following functions:\n", mode);
#ifdef CONFIG_MV_RAID5_XOR_OFFLOAD
    printk(KERN_INFO "  o RAID 5 Xor calculation\n");
#endif
#ifdef CONFIG_MV_XORMEMCOPY
    printk(KERN_INFO "  o kernel memcpy\n");
#endif
#ifdef CONFIG_MV_XORMEMZERO
    printk(KERN_INFO "  o kenrel memzero\n");
#endif
#ifdef CONFIG_MV_USE_XOR_FOR_COPY_USER_BUFFERS
    printk(KERN_INFO "  o copy user to/from kernel buffers\n");
#endif
    printk(KERN_INFO "Number of XOR engines to use: %d\n", XOR_MAX_CHANNELS);

    if(mvCtrlModelGet() == MV_5082_DEV_ID)
    {
        printk(KERN_WARNING " This device doesn't have XOR engines.\n");    
        return -ENODEV;
    }
    mvXorInit();

    /* pre-alloc XOR descriptors */
    pDescriptors = dma_alloc_coherent(NULL, sizeof(MV_XOR_DESC) * XOR_MAX_CHANNELS,
                                            &descsPhyAddr, GFP_KERNEL);  
    if(pDescriptors == NULL)
    {
        printk(KERN_ERR "%s: failed to allocate XOR descriptors\n", __func__);
        return -ENOMEM;
    }
    sema_init(&meminit_sema, 1);
    memset(pDescriptors, 0, sizeof(MV_XOR_DESC) * XOR_MAX_CHANNELS);
    DPRINTK(" allocating XOR Descriptors: virt add %p, phys addr %x\n", 
        pDescriptors, descsPhyAddr);
    for(chan = 0; chan  < XOR_MAX_CHANNELS; chan++)
    {
	xor_channel[chan].chan_num = chan;
        xor_channel[chan].pDescriptor = pDescriptors + chan;
        xor_channel[chan].descPhyAddr = descsPhyAddr + (sizeof(MV_XOR_DESC) * chan);
	xor_channel[chan].chan_active = 0;

        sema_init(&xor_channel[chan].sema, 1);
        init_waitqueue_head(&xor_channel[chan].waitq);
        mvXorCtrlSet(chan, (1 << XEXCR_REG_ACC_PROTECT_OFFS) | 
                    (4 << XEXCR_DST_BURST_LIMIT_OFFS) |
                    (4 << XEXCR_SRC_BURST_LIMIT_OFFS));
#ifdef CONFIG_ENABLE_XOR_INTERRUPTS
        switch(chan)
        {
            case 0:
                xor_channel[chan].irq_num = XOR0_IRQ_NUM;
                xor_channel[chan].name = "xor_chan0";
                break;
            case 1:
                xor_channel[chan].irq_num = XOR1_IRQ_NUM;
                xor_channel[chan].name = "xor_chan1";
            break;
            default:
                printk(KERN_ERR "%s: trying to configure bad xor channel\n", __func__);
                return -ENXIO; 
        }
        err = request_irq(xor_channel[chan].irq_num, mv_xor_isr, SA_INTERRUPT,
				  xor_channel[chan].name, (void *)chan);
        if (err < 0)
        {
            printk(KERN_ERR "%s: unable to request IRQ %d for "
                            "XOR %d: %d\n", __func__, XOR0_IRQ_NUM, chan, err);
        	return -EBUSY;
        }
        MV_REG_WRITE(XOR_MASK_REG,0xFFEFFFEF); 
#endif
    }
#ifdef CONFIG_PROC_FS
	xor_read_proc_entry =
        create_proc_entry("mv_xor", S_IFREG | S_IRUGO, 0);
        xor_read_proc_entry->read_proc = xor_read_proc;
        xor_read_proc_entry->write_proc = NULL;
	xor_read_proc_entry->nlink = 1;
#endif
    xor_engine_initialized = 1;
    return 0;
}