/**
 *  ifx_port_init - Initialize port structures
 *
 *  This function initializes the internal data structures of the driver
 *  and will create the proc file entry and device.
 *
 *      Return Value:
 *  @OK = OK
 */
static int __init ifx_rcu_init(void)
{
    int ret;
    char ver_str[64];

    /* register port device */
    ret = register_chrdev(IFX_RCU_MAJOR, "ifx_rcu", &g_rcu_fops);
    if ( ret != 0 ) {
        err("Can not register RCU device - %d", ret);
        return ret;
    }

    proc_file_create();

    ifx_rcu_version(ver_str);
    printk(KERN_INFO "%s", ver_str);

    return IFX_SUCCESS;
}
示例#2
0
int
danube_dma_init (void)
{
	int result = 0;
	int i;
	//pliu: 2007021201
#ifdef TWEAK_DMA_BUFFER_RING_SIZE
	int cnt=0;
	void ** p = NULL;
#endif
	result = register_chrdev (DMA_MAJOR, "dma-core", &dma_fops);
	if (result) {
		DANUBE_DMA_EMSG ("cannot register device dma-core!\n");
		return result;
	}
	danube_dma_sem =
		(struct semaphore *) kmalloc (sizeof (struct semaphore),
					      GFP_KERNEL);
	init_MUTEX (danube_dma_sem);
	dma_chip_init ();
	map_dma_chan (default_dma_map);
	//pliu: 2007021201
#ifdef TWEAK_DMA_BUFFER_RING_SIZE
	//customize descriptor length per channels
	for(i=0;i<MAX_DMA_CHANNEL_NUM;i++) {
		dma_chan[i].desc_len= DANUBE_DMA_DESCRIPTOR_OFFSET;
		if (  ( i== 6) || (i == 7)  ) {
			//RX/TX channels for Eth0/Eth1
			dma_chan[i].desc_len= DMA_ETH_NUM_DESCRS;
		}
		cnt += dma_chan[i].desc_len;
		p = (void **) kmalloc(dma_chan[i].desc_len * sizeof(void *), GFP_DMA);
		if (p == NULL){
			DANUBE_DMA_EMSG("no memory for desriptor opt\n");
			goto dma_init_no_memory_err_exit;
		}
		dma_chan[i].opt = p;
	}
	g_desc_list=(u64*)kmalloc(cnt * sizeof(u64), GFP_DMA);
	if (g_desc_list == NULL){
		DANUBE_DMA_EMSG("no memory for desriptor\n");
		goto dma_init_no_memory_err_exit;
	}
	g_desc_list_backup = g_desc_list;
	g_desc_list = (u64*)((u32)g_desc_list | 0xA0000000);
	dma_cache_inv(g_desc_list_backup, cnt * sizeof(u64));
	memset(g_desc_list, 0, cnt * sizeof(u64));
	cnt=0;
	for(i=0;i<MAX_DMA_CHANNEL_NUM;i++)
	{
		dma_chan[i].desc_base=(u32)g_desc_list+cnt*sizeof(u64);
		dma_chan[i].curr_desc=0;
		cnt+=dma_chan[i].desc_len;
		select_chan(i);
		*DANUBE_DMA_CDBA=(u32)CPHYSADDR(dma_chan[i].desc_base);
		*DANUBE_DMA_CDLEN=dma_chan[i].desc_len;
	}
#else
//	g_desc_list = (u64 *) (__get_free_page (GFP_DMA));
    g_desc_list=(u64*)kmalloc(DANUBE_DMA_DESCRIPTOR_OFFSET * MAX_DMA_CHANNEL_NUM * sizeof(u64), GFP_DMA);
	if (g_desc_list == NULL) {
		DANUBE_DMA_EMSG ("no memory for desriptor\n");
		return -ENOMEM;
	}
//	dma_cache_inv(g_desc_list, PAGE_SIZE);
    dma_cache_inv(g_desc_list, DANUBE_DMA_DESCRIPTOR_OFFSET * MAX_DMA_CHANNEL_NUM * sizeof(u64));
    g_desc_list_backup = g_desc_list;
//	g_desc_list = KSEG1ADDR(g_desc_list);
    g_desc_list = (u64*)((u32)g_desc_list | 0xA0000000);
//	memset (g_desc_list, 0, PAGE_SIZE);
    memset(g_desc_list, 0, DANUBE_DMA_DESCRIPTOR_OFFSET * MAX_DMA_CHANNEL_NUM * sizeof(u64));
	for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++) {
		dma_chan[i].desc_base =
			(u32) g_desc_list +
			i * DANUBE_DMA_DESCRIPTOR_OFFSET * 8;
		dma_chan[i].curr_desc = 0;
		dma_chan[i].desc_len = DANUBE_DMA_DESCRIPTOR_OFFSET;
		select_chan (i);
		*DANUBE_DMA_CDBA = (u32) CPHYSADDR (dma_chan[i].desc_base);
		*DANUBE_DMA_CDLEN = dma_chan[i].desc_len;
	}
#endif

	g_danube_dma_dir = proc_mkdir ("danube_dma", NULL);

	create_proc_read_entry ("dma_register",
				0,
				g_danube_dma_dir,
				dma_register_proc_read, NULL);

	create_proc_read_entry ("g_desc_list",
				0,
				g_danube_dma_dir, desc_list_proc_read, NULL);

	create_proc_read_entry ("channel_weight",
				0,
				g_danube_dma_dir,
				channel_weight_proc_read, NULL);
    proc_file_create();
	return 0;
	//pliu: 2007021201
#ifdef TWEAK_DMA_BUFFER_RING_SIZE
dma_init_no_memory_err_exit:
	for (i=0;i<MAX_DMA_CHANNEL_NUM;i++){
		if (dma_chan[i].opt){
			kfree(dma_chan[i].opt);
		}
	}
	return -ENOMEM;
#endif
}