static int __init mpq_dmx_sw_plugin_init(void) { return mpq_dmx_plugin_init(mpq_sw_dmx_init); }
/** * Module initialization function. * * Return error code */ static int __init mpq_dmx_tsif_plugin_init(void) { int i; int ret; MPQ_DVB_DBG_PRINT("%s executed\n", __func__); /* check module parameters validity */ if (threshold < 1) { MPQ_DVB_ERR_PRINT( "%s: invalid threshold parameter, using %d instead\n", __func__, DMX_TSIF_PACKETS_IN_CHUNK_DEF); threshold = DMX_TSIF_PACKETS_IN_CHUNK_DEF; } if ((tsif_mode < 1) || (tsif_mode > 3)) { MPQ_DVB_ERR_PRINT( "%s: invalid mode parameter, using %d instead\n", __func__, DMX_TSIF_DRIVER_MODE_DEF); tsif_mode = DMX_TSIF_DRIVER_MODE_DEF; } for (i = 0; i < TSIF_COUNT; i++) { snprintf(mpq_dmx_tsif_info.tsif[i].name, TSIF_NAME_LENGTH, "dmx_tsif%d", i); atomic_set(&mpq_dmx_tsif_info.tsif[i].data_cnt, 0); init_waitqueue_head(&mpq_dmx_tsif_info.tsif[i].wait_queue); mpq_dmx_tsif_info.tsif[i].thread = kthread_run( mpq_dmx_tsif_thread, (void *)i, mpq_dmx_tsif_info.tsif[i].name); if (IS_ERR(mpq_dmx_tsif_info.tsif[i].thread)) { int j; for (j = 0; j < i; j++) { kthread_stop(mpq_dmx_tsif_info.tsif[j].thread); mutex_destroy(&mpq_dmx_tsif_info.tsif[j].mutex); } MPQ_DVB_ERR_PRINT( "%s: kthread_run failed\n", __func__); return -ENOMEM; } mutex_init(&mpq_dmx_tsif_info.tsif[i].mutex); mpq_dmx_tsif_info.tsif[i].tsif_driver.tsif_handler = NULL; mpq_dmx_tsif_info.tsif[i].ref_count = 0; } ret = mpq_dmx_plugin_init(mpq_tsif_dmx_init); if (ret < 0) { MPQ_DVB_ERR_PRINT( "%s: mpq_dmx_plugin_init failed (errno=%d)\n", __func__, ret); for (i = 0; i < TSIF_COUNT; i++) { kthread_stop(mpq_dmx_tsif_info.tsif[i].thread); mutex_destroy(&mpq_dmx_tsif_info.tsif[i].mutex); } } return ret; }
static int __init mpq_dmx_tspp_plugin_init(void) { int i; int j; int ret; MPQ_DVB_DBG_PRINT("%s executed\n", __func__); for (i = 0; i < TSIF_COUNT; i++) { mpq_dmx_tspp_info.tsif[i].buffer_count = TSPP_BUFFER_COUNT(tspp_out_buffer_size); mpq_dmx_tspp_info.tsif[i].aggregate_ids = vzalloc(mpq_dmx_tspp_info.tsif[i].buffer_count * sizeof(int)); if (NULL == mpq_dmx_tspp_info.tsif[i].aggregate_ids) { MPQ_DVB_ERR_PRINT( "%s: Failed to allocate memory for buffer descriptors aggregation\n", __func__); for (j = 0; j < i; j++) { kthread_stop(mpq_dmx_tspp_info.tsif[j].thread); vfree(mpq_dmx_tspp_info.tsif[j].aggregate_ids); mutex_destroy(&mpq_dmx_tspp_info.tsif[j].mutex); } return -ENOMEM; } mpq_dmx_tspp_info.tsif[i].channel_ref = 0; mpq_dmx_tspp_info.tsif[i].buff_index = 0; mpq_dmx_tspp_info.tsif[i].ch_mem_heap_handle = NULL; mpq_dmx_tspp_info.tsif[i].ch_mem_heap_virt_base = NULL; mpq_dmx_tspp_info.tsif[i].ch_mem_heap_phys_base = 0; atomic_set(&mpq_dmx_tspp_info.tsif[i].data_cnt, 0); for (j = 0; j < TSPP_MAX_PID_FILTER_NUM; j++) { mpq_dmx_tspp_info.tsif[i].filters[j].pid = -1; mpq_dmx_tspp_info.tsif[i].filters[j].ref_count = 0; } snprintf(mpq_dmx_tspp_info.tsif[i].name, TSIF_NAME_LENGTH, "dmx_tsif%d", i); init_waitqueue_head(&mpq_dmx_tspp_info.tsif[i].wait_queue); mpq_dmx_tspp_info.tsif[i].thread = kthread_run( mpq_dmx_tspp_thread, (void *)i, mpq_dmx_tspp_info.tsif[i].name); if (IS_ERR(mpq_dmx_tspp_info.tsif[i].thread)) { vfree(mpq_dmx_tspp_info.tsif[i].aggregate_ids); for (j = 0; j < i; j++) { kthread_stop(mpq_dmx_tspp_info.tsif[j].thread); vfree(mpq_dmx_tspp_info.tsif[j].aggregate_ids); mutex_destroy(&mpq_dmx_tspp_info.tsif[j].mutex); } MPQ_DVB_ERR_PRINT( "%s: kthread_run failed\n", __func__); return -ENOMEM; } mutex_init(&mpq_dmx_tspp_info.tsif[i].mutex); } ret = mpq_dmx_plugin_init(mpq_tspp_dmx_init); if (ret < 0) { MPQ_DVB_ERR_PRINT( "%s: mpq_dmx_plugin_init failed (errno=%d)\n", __func__, ret); for (i = 0; i < TSIF_COUNT; i++) { kthread_stop(mpq_dmx_tspp_info.tsif[i].thread); vfree(mpq_dmx_tspp_info.tsif[i].aggregate_ids); mutex_destroy(&mpq_dmx_tspp_info.tsif[i].mutex); } } return ret; }
static int __init mpq_dmx_tspp_plugin_init(void) { MPQ_DVB_DBG_PRINT("%s executed\n", __func__); return mpq_dmx_plugin_init(mpq_tspp_dmx_init); }