int cx23888_ir_probe(struct cx23885_dev *dev) { struct cx23888_ir_state *state; struct v4l2_subdev *sd; struct v4l2_subdev_ir_parameters default_params; int ret; state = kzalloc(sizeof(struct cx23888_ir_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; spin_lock_init(&state->rx_kfifo_lock); #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33) state->rx_kfifo = kfifo_alloc(CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL, &state->rx_kfifo_lock); if (state->rx_kfifo == NULL) return -ENOMEM; #else if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL)) return -ENOMEM; #endif state->dev = dev; state->id = V4L2_IDENT_CX23888_IR; state->rev = 0; sd = &state->sd; v4l2_subdev_init(sd, &cx23888_ir_controller_ops); v4l2_set_subdevdata(sd, state); /* FIXME - fix the formatting of dev->v4l2_dev.name and use it */ snprintf(sd->name, sizeof(sd->name), "%s/888-ir", dev->name); sd->grp_id = CX23885_HW_888_IR; ret = v4l2_device_register_subdev(&dev->v4l2_dev, sd); if (ret == 0) { /* * Ensure no interrupts arrive from '888 specific conditions, * since we ignore them in this driver to have commonality with * similar IR controller cores. */ cx23888_ir_write4(dev, CX23888_IR_IRQEN_REG, 0); mutex_init(&state->rx_params_lock); memcpy(&default_params, &default_rx_params, sizeof(struct v4l2_subdev_ir_parameters)); v4l2_subdev_call(sd, ir, rx_s_parameters, &default_params); mutex_init(&state->tx_params_lock); memcpy(&default_params, &default_tx_params, sizeof(struct v4l2_subdev_ir_parameters)); v4l2_subdev_call(sd, ir, tx_s_parameters, &default_params); } else { #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33) kfifo_free(state->rx_kfifo); #else kfifo_free(&state->rx_kfifo); #endif } return ret; }
static int stp_uart_fifo_init(void) { int err = 0; /*add rx fifo*/ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)) { spin_lock_init(&g_stp_uart_rx_fifo_spinlock); g_stp_uart_rx_fifo = kfifo_alloc(LDISC_RX_FIFO_SIZE, GFP_ATOMIC, &g_stp_uart_rx_fifo_spinlock); if (NULL == g_stp_uart_rx_fifo) { UART_ERR_FUNC("kfifo_alloc failed (kernel version < 2.6.35)\n"); err = -1; } } #else { g_stp_uart_rx_fifo = kzalloc(sizeof(struct kfifo), GFP_ATOMIC); if (NULL == g_stp_uart_rx_fifo) { err = -2; UART_ERR_FUNC("kzalloc for g_stp_uart_rx_fifo failed (kernel version > 2.6.35)\n"); } err = kfifo_alloc(g_stp_uart_rx_fifo, LDISC_RX_FIFO_SIZE, GFP_ATOMIC); if (0 != err) { UART_ERR_FUNC("kfifo_alloc failed, errno(%d)(kernel version > 2.6.35)\n", err); kfree(g_stp_uart_rx_fifo); g_stp_uart_rx_fifo = NULL; err = -3; } } #endif if (0 == err) { if (NULL != g_stp_uart_rx_fifo) { kfifo_reset(g_stp_uart_rx_fifo); UART_ERR_FUNC("stp_uart_fifo_init() success.\n"); } else { err = -4; UART_ERR_FUNC("abnormal case, err = 0 but g_stp_uart_rx_fifo = NULL, set err to %d\n", err); } } else { UART_ERR_FUNC("stp_uart_fifo_init() failed.\n"); } #if 0 spin_lock_init(&g_stp_uart_rx_handling_lock); #endif return err; }
/** * Kernel Queue를 2개를 생성하여 2개를 각각의 쓰레드로 실행시키는 함수. */ int __init init_fifo_test(void) { unsigned int i; printk("fifo start\n"); filp1 = filp_open("/tmp/read_sense", O_WRONLY, S_IRUSR|S_IWUSR); if (IS_ERR(filp1)) { const int open_errno = -PTR_ERR(filp1); printk("fifo1 open error: %s\n errno=%d", __FUNCTION__, open_errno); return 0; } else { printk("fifo1 open success\n"); } filp = filp_open("/tmp/read_sense2", O_WRONLY, S_IRUSR|S_IWUSR); if (IS_ERR(filp)) { const int open_errno = -PTR_ERR(filp); printk("fifo open error: %s\n errno=%d", __FUNCTION__, open_errno); return 0; } else { printk("fifo open success\n"); } printk("fifo1 module insert-----------\n"); if( kfifo_alloc(&fifo1, FIFOSIZE, GFP_KERNEL) ) { printk(KERN_WARNING "fifo1 error kfifo_alloc1 \n"); return -ENOMEM; } printk("fifo module insert-----------\n"); if( kfifo_alloc(&fifo, FIFOSIZE, GFP_KERNEL) ) { printk(KERN_WARNING "fifo error kfifo_alloc \n"); return -ENOMEM; } printk(KERN_INFO "queue1 size : %u \n", kfifo_size(&fifo1)); printk(KERN_INFO "queue size : %u \n", kfifo_size(&fifo)); printk(KERN_INFO "queue1 available : %u\n", kfifo_avail(&fifo1)); printk(KERN_INFO "queue available : %u\n", kfifo_avail(&fifo)); #if 1 printk(KERN_INFO "thread_start1\n"); t_id1 = (struct task_struct *)kthread_run(thread_loop1, NULL, "%s", "create_test1"); printk(KERN_INFO "thread_start\n"); t_id = (struct task_struct *)kthread_run(thread_loop, NULL, "%s", "create_test"); #endif return 0; }
static int client_init(emd_dev_client_t *client, int major,int sub_id) { int ret = 0; if( (sub_id >= EMD_CHR_CLIENT_NUM) || (sub_id < 0) ){ EMD_MSG_INF("chr","client_init:sub_id(%d) error\n",sub_id); return -1; } // 1. Clear client memset(client, 0, sizeof(emd_dev_client_t)); // 2. Setting device id client->major_dev_id = major; client->sub_dev_id = sub_id; // 3. Init wait queue head, wake lock, spin loc and semaphore init_waitqueue_head(&client->wait_q); spin_lock_init(&client->lock); mutex_init(&client->emd_mutex); // 4. Set user_num to zero client->user_num = 0; // 5. Alloc and init kfifo ret=kfifo_alloc(&client->fifo, EMD_MAX_MESSAGE_NUM*sizeof(int),GFP_ATOMIC); if (ret){ EMD_MSG_INF("chr","kfifo alloc failed(ret=%d).\n",ret); return ret; } EMD_MSG_INF("chr","client_init:sub_id=%d\n",client->sub_dev_id); return 0; }
static __init int dccpprobe_init(void) { int ret = -ENOMEM; init_waitqueue_head(&dccpw.wait); spin_lock_init(&dccpw.lock); dccpw.fifo = kfifo_alloc(bufsize, GFP_KERNEL, &dccpw.lock); if (IS_ERR(dccpw.fifo)) return PTR_ERR(dccpw.fifo); if (!proc_net_fops_create(procname, S_IRUSR, &dccpprobe_fops)) goto err0; ret = register_jprobe(&dccp_send_probe); if (ret) goto err1; pr_info("DCCP watch registered (port=%d)\n", port); return 0; err1: proc_net_remove(procname); err0: kfifo_free(dccpw.fifo); return ret; }
/* * Used to (un)register raw event clients */ int ir_raw_event_register(struct input_dev *input_dev) { struct ir_input_dev *ir = input_get_drvdata(input_dev); int rc; struct ir_raw_handler *handler; ir->raw = kzalloc(sizeof(*ir->raw), GFP_KERNEL); if (!ir->raw) return -ENOMEM; ir->raw->input_dev = input_dev; INIT_WORK(&ir->raw->rx_work, ir_raw_event_work); ir->raw->enabled_protocols = ~0; rc = kfifo_alloc(&ir->raw->kfifo, sizeof(s64) * MAX_IR_EVENT_SIZE, GFP_KERNEL); if (rc < 0) { kfree(ir->raw); ir->raw = NULL; return rc; } spin_lock(&ir_raw_handler_lock); list_add_tail(&ir->raw->list, &ir_raw_client_list); list_for_each_entry(handler, &ir_raw_handler_list, list) if (handler->raw_register) handler->raw_register(ir->raw->input_dev); spin_unlock(&ir_raw_handler_lock); return 0; }
static int netconsole_init(void) { struct nc_priv *priv; struct console_device *cdev; priv = xzalloc(sizeof(*priv)); cdev = &priv->cdev; cdev->tstc = nc_tstc; cdev->putc = nc_putc; cdev->getc = nc_getc; g_priv = priv; priv->fifo = kfifo_alloc(1024); console_register(cdev); dev_add_param(&cdev->class_dev, "ip", nc_remoteip_set, NULL, 0); dev_add_param(&cdev->class_dev, "port", nc_port_set, NULL, 0); dev_set_param(&cdev->class_dev, "port", "6666"); printf("registered netconsole as %s%d\n", cdev->class_dev.name, cdev->class_dev.id); return 0; }
static int nullmodem_open(struct tty_struct *tty, struct file *file) { struct nullmodem_pair *pair = &pair_table[tty->index/2]; struct nullmodem_end *end = ((tty->index&1) ? &pair->b : &pair->a); unsigned long flags; int index; int err = -ENOMEM; dprintf("%s - #%d c:%d\n", __FUNCTION__, tty->index, tty->count); if (tty->count > 1) return 0; index = tty->index; tport[index].tty=tty; tty->port = &tport[index]; spin_lock_irqsave(&end->pair->spin, flags); if (kfifo_alloc(&end->fifo, TX_BUF_SIZE, GFP_KERNEL)) goto exit; tty->driver_data = end; end->tty = tty; end->nominal_bit_count = 0; end->actual_bit_count = 0; handle_termios(tty); err = 0; exit: spin_unlock_irqrestore(&end->pair->spin, flags); return err; }
static int dv_spi_init(void) { int result; /* Registering device */ result = register_chrdev(MAJOR_VERSION, "spi", &dv_spi_fops); if (result < 0) { printk("\ndv_spi: cannot obtain major number %d\n", MAJOR_VERSION); return result; } // Allocate space for the read buffer g_readbuf = kmalloc(MAX_BUF_SIZE, GFP_KERNEL); if (!g_readbuf) { result = -ENOMEM; dv_spi_exit(); return result; } g_kfifo = kfifo_alloc(MAX_BUF_SIZE, GFP_KERNEL,&g_spinlock); if (!g_kfifo) { result = -ENOMEM; dv_spi_exit(); return result; } printk("\nInserting SPI module\n"); return 0; }
int main(void) { struct kfifo fck; struct kfifo *fifo = &fck; int tmp, array[256]; int out[256]; int i; tmp = kfifo_alloc(fifo, 256, sizeof(int)); printf("\nkfifo_alloc ret = %d\n", tmp); kfifo_disp_info(fifo); for(i = 0; i < ARRAY_SIZE(array); i++) array[i] = i; // for(i = 0; i < 100; i++) tmp = kfifo_in(fifo, array, 100); printf("\nkfifo_in ret = %d\n", tmp); kfifo_disp_info(fifo); tmp = kfifo_out(fifo, out, 10); printf("\nkfifo_out ret = %d\n", tmp); kfifo_disp_info(fifo); for(i = 0; i < 10; i++) printf("out[%d]= %d\n", i, out[i]); kfifo_free(fifo); return 0; }
int test(void) { struct kfifo fifo; int ret, tam; char *buf; /* Inicializo la cola y compruebo errores */ ret = kfifo_alloc(&fifo, FIFO_SIZE, GFP_KERNEL); if (ret) { printk(KERN_ERR "error kfifo_alloc\n"); return ret; } /* Introduzco la cadena Hello en la cola */ kfifo_in(&fifo, "Hello", 5); /* Introduzco la cadena LIN de la cola */ kfifo_in(&fifo, "LIN", 3); /* Extraigo la cadena Hello de la cola */ tam = kfifo_out(&fifo, buf, 5); printk(KERN_INFO "%s", buf); /* Devuelvo la cadena LIN sin extraerla de la cola */ if (kfifo_peek(&test, buf)) printk(KERN_INFO " %s\n", buf); /* Elimino la cola */ kfifo_free(&test); return 0; }
int hidpp_init(struct hidpp_device *hidpp_dev, struct hid_device *hid_dev) { if (hidpp_dev->initialized) return 0; hidpp_dev->init_retry = 0; hidpp_dev->hid_dev = hid_dev; hidpp_dev->initialized = 1; INIT_WORK(&hidpp_dev->work, delayed_work_cb); mutex_init(&hidpp_dev->send_mutex); init_waitqueue_head(&hidpp_dev->wait); spin_lock_init(&hidpp_dev->lock); if (kfifo_alloc(&hidpp_dev->delayed_work_fifo, 4 * sizeof(struct hidpp_report), GFP_KERNEL)) { dev_err(&hidpp_dev->hid_dev->dev, "%s:failed allocating delayed_work_fifo\n", __func__); mutex_destroy(&hidpp_dev->send_mutex); return -ENOMEM; } return 0; }
int eemcs_sysmsg_mod_init(void) { int ret = 0; DBGLOG(SMSG, DBG, "====> %s", FUNC_NAME); ret = kfifo_alloc(&sysmsg_fifo, sizeof(unsigned)*CCCI_SYSMSG_MAX_REQ_NUM, GFP_KERNEL); if (ret) { DBGLOG(SMSG, ERR, "kfifo_alloc fail: %d", ret); return ret; } //related channel registration. //RX KAL_ASSERT(ccci_ch_register((CCCI_CHANNEL_T)CH_SYS_RX, eemcs_sysmsg_rx_dispatch_cb, 0) == KAL_SUCCESS); //TX eemcs_register_sys_msg_notify_func(MD_SYS5, send_eemcs_system_ch_msg); INIT_WORK(&sysmsg_work, eemcs_sysmsg_work); #ifdef __EEMCS_SYSMSG_IT__ eemcs_register_ccci_sys_call_back(MD_SYS5, TEST_MSG_ID_MD2AP, eemcs_sysmsg_echo_test); #endif //DBGLOG(SMSG, TRA, "register sys msg callback: md_get_battery_info"); eemcs_register_ccci_sys_call_back(MD_SYS5, EXT_MD_GET_BATTERY_INFO , eemcs_md_get_battery_info); //EXT_MD_GET_BATTERY_INFO == MD_GET_BATTERY_INFO == 0x105 eemcs_register_ccci_sys_call_back(MD_SYS5, EXT_MD_SIM_TYPE, set_sim_type); DBGLOG(SMSG, DBG, "<==== %s", FUNC_NAME); return 0; }
static int _btif_tx_fifo_init(P_MTK_BTIF_INFO_STR p_btif_info) { int i_ret = -1; spin_lock_init(&(p_btif_info->tx_fifo_spinlock)); if (NULL == p_btif_info->p_tx_fifo) { p_btif_info->p_tx_fifo = kzalloc(sizeof(struct kfifo), GFP_ATOMIC); if (NULL == p_btif_info->p_tx_fifo) { i_ret = -ENOMEM; BTIF_ERR_FUNC("kzalloc for p_btif->p_tx_fifo failed\n"); goto ret; } i_ret = kfifo_alloc(p_btif_info->p_tx_fifo, BTIF_HAL_TX_FIFO_SIZE, GFP_ATOMIC); if (0 != i_ret) { BTIF_ERR_FUNC("kfifo_alloc failed, errno(%d)\n", i_ret); i_ret = -ENOMEM; goto ret; } i_ret = 0; } else { BTIF_WARN_FUNC ("p_btif_info->p_tx_fifo is already init p_btif_info->p_tx_fifo(0x%x)\n", p_btif_info->p_tx_fifo); i_ret = 0; } ret: return i_ret; }
static int gs_console_setup(struct console *co, char *options) { struct gscons_info *info = &gscons_info; int status; info->port = NULL; info->console_req = NULL; info->req_busy = 0; spin_lock_init(&info->con_lock); status = kfifo_alloc(&info->con_buf, GS_CONSOLE_BUF_SIZE, GFP_KERNEL); if (status) { pr_err("%s: allocate console buffer failed\n", __func__); return status; } info->console_thread = kthread_create(gs_console_thread, co, "gs_console"); if (IS_ERR(info->console_thread)) { pr_err("%s: cannot create console thread\n", __func__); kfifo_free(&info->con_buf); return PTR_ERR(info->console_thread); } wake_up_process(info->console_thread); return 0; }
static int netconsole_init(void) { struct nc_priv *priv; struct console_device *cdev; int ret; priv = xzalloc(sizeof(*priv)); cdev = &priv->cdev; cdev->tstc = nc_tstc; cdev->putc = nc_putc; cdev->getc = nc_getc; cdev->devname = "netconsole"; cdev->devid = DEVICE_ID_SINGLE; cdev->set_active = nc_set_active; g_priv = priv; priv->fifo = kfifo_alloc(1024); ret = console_register(cdev); if (ret) { pr_err("registering failed with %s\n", strerror(-ret)); kfree(priv); return ret; } priv->port = 6666; dev_add_param_ip(&cdev->class_dev, "ip", NULL, NULL, &priv->ip, NULL); dev_add_param_int(&cdev->class_dev, "port", NULL, NULL, &priv->port, "%u", NULL); pr_info("registered as %s%d\n", cdev->class_dev.name, cdev->class_dev.id); return 0; }
static int lbs_init_adapter(struct lbs_private *priv) { int ret; lbs_deb_enter(LBS_DEB_MAIN); memset(priv->current_addr, 0xff, ETH_ALEN); priv->connect_status = LBS_DISCONNECTED; priv->channel = DEFAULT_AD_HOC_CHANNEL; priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON; priv->radio_on = 1; priv->psmode = LBS802_11POWERMODECAM; priv->psstate = PS_STATE_FULL_POWER; priv->is_deep_sleep = 0; priv->is_auto_deep_sleep_enabled = 0; priv->deep_sleep_required = 0; priv->wakeup_dev_required = 0; init_waitqueue_head(&priv->ds_awake_q); init_waitqueue_head(&priv->scan_q); priv->authtype_auto = 1; priv->is_host_sleep_configured = 0; priv->is_host_sleep_activated = 0; init_waitqueue_head(&priv->host_sleep_q); init_waitqueue_head(&priv->fw_waitq); mutex_init(&priv->lock); setup_timer(&priv->command_timer, lbs_cmd_timeout_handler, (unsigned long)priv); setup_timer(&priv->tx_lockup_timer, lbs_tx_lockup_handler, (unsigned long)priv); setup_timer(&priv->auto_deepsleep_timer, auto_deepsleep_timer_fn, (unsigned long)priv); INIT_LIST_HEAD(&priv->cmdfreeq); INIT_LIST_HEAD(&priv->cmdpendingq); spin_lock_init(&priv->driver_lock); /* Allocate the command buffers */ if (lbs_allocate_cmd_buffer(priv)) { pr_err("Out of memory allocating command buffers\n"); ret = -ENOMEM; goto out; } priv->resp_idx = 0; priv->resp_len[0] = priv->resp_len[1] = 0; /* Create the event FIFO */ ret = kfifo_alloc(&priv->event_fifo, sizeof(u32) * 16, GFP_KERNEL); if (ret) { pr_err("Out of memory allocating event FIFO buffer\n"); goto out; } out: lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret); return ret; }
static int __init example_init(void) { #ifdef DYNAMIC int ret; ret = kfifo_alloc(&test, FIFO_SIZE, GFP_KERNEL); if (ret) { printk(KERN_ERR "error kfifo_alloc\n"); return ret; } #else INIT_KFIFO(test); #endif if (testfunc() < 0) { #ifdef DYNAMIC kfifo_free(&test); #endif return -EIO; } if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) { #ifdef DYNAMIC kfifo_free(&test); #endif return -ENOMEM; } return 0; }
static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox, void (*work) (struct work_struct *), void (*tasklet)(unsigned long)) { struct omap_mbox_queue *mq; mq = kzalloc(sizeof(struct omap_mbox_queue), GFP_KERNEL); if (!mq) return NULL; spin_lock_init(&mq->lock); if (kfifo_alloc(&mq->fifo, mbox_kfifo_size, GFP_KERNEL)) goto error; if (work) INIT_WORK(&mq->work, work); if (tasklet) tasklet_init(&mq->tasklet, tasklet, (unsigned long)mbox); return mq; error: kfree(mq); return NULL; }
int ath10k_htt_tx_alloc(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; int ret, size; ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", htt->max_num_pending_tx); spin_lock_init(&htt->tx_lock); idr_init(&htt->pending_tx); size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf); htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size, &htt->txbuf.paddr, GFP_KERNEL); if (!htt->txbuf.vaddr) { ath10k_err(ar, "failed to alloc tx buffer\n"); ret = -ENOMEM; goto free_idr_pending_tx; } ret = ath10k_htt_tx_alloc_cont_frag_desc(htt); if (ret) { ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret); goto free_txbuf; } ret = ath10k_htt_tx_alloc_txq(htt); if (ret) { ath10k_err(ar, "failed to alloc txq: %d\n", ret); goto free_frag_desc; } size = roundup_pow_of_two(htt->max_num_pending_tx); ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL); if (ret) { ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret); goto free_txq; } return 0; free_txq: ath10k_htt_tx_free_txq(htt); free_frag_desc: ath10k_htt_tx_free_cont_frag_desc(htt); free_txbuf: size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf); dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr, htt->txbuf.paddr); free_idr_pending_tx: idr_destroy(&htt->pending_tx); return ret; }
int usart_fifo_init(struct TSerialDevice* dev) { assert_param(dev); if(dev){ if(dev->mode &SERIAL_RX_INT_MODE){ dev->rx_fifo = kfifo_alloc(MAX_USART_BUF); if(dev->rx_fifo == NULL) return 0; } if(dev->mode &SERIAL_TX_INT_MODE){ dev->tx_fifo = kfifo_alloc(MAX_USART_BUF); if(dev->tx_fifo == NULL) return 0; } return 1; } return 0; }
static inline int __iio_allocate_kfifo(struct iio_kfifo *buf, int bytes_per_datum, int length) { if ((length == 0) || (bytes_per_datum == 0)) return -EINVAL; __iio_update_buffer(&buf->buffer, bytes_per_datum, length); return kfifo_alloc(&buf->kf, bytes_per_datum*length, GFP_KERNEL); }
static struct threadrw_write_task *threadrw_buf_alloc_in(int num, int block_size, ssize_t (*write)(struct file *, struct stream_buf_s *, const char __user *, size_t, int)) { int task_buffer_size = sizeof(struct threadrw_write_task) + sizeof(struct threadrw_buf) * (num - 1) + 4; struct threadrw_write_task *task = vmalloc(task_buffer_size); int ret; if (!task) return NULL; memset(task, 0, task_buffer_size); spin_lock_init(&task->lock); INIT_DELAYED_WORK(&task->write_work, do_write_work); init_waitqueue_head(&task->wq); ret = kfifo_alloc(&task->datafifo, num, GFP_KERNEL); if (ret) goto err1; ret = kfifo_alloc(&task->freefifo, num, GFP_KERNEL); if (ret) goto err2; task->write = write; task->file = NULL; task->buffer_size = 0; ret = init_task_buffers(task, num, block_size); if (ret < 0) goto err3; threadrw_wq_get(); /*start thread. */ return task; err3: kfifo_free(&task->freefifo); err2: kfifo_free(&task->datafifo); err1: vfree(task); pr_err("alloc threadrw failed num:%d,block:%d\n", num, block_size); return NULL; }
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++ //LaunchAudioHalThread // Create Worker thread. //---------------------------------------------------------------- int LaunchAudioCtrlThread(void) { sgThreadData.m_lock = SPIN_LOCK_UNLOCKED; kfifo_alloc(&sgThreadData.m_pkfifo, KFIFO_SIZE, GFP_KERNEL); DEBUG("LaunchAudioCtrlThread KFIFO_SIZE= %d actual =%d\n", KFIFO_SIZE,sgThreadData.m_pkfifo.size); kfifo_alloc(&sgThreadData.m_pkfifo_out, KFIFO_SIZE, GFP_KERNEL); DEBUG("LaunchAudioCtrlThread KFIFO_SIZE= %d actual =%d\n", KFIFO_SIZE,sgThreadData.m_pkfifo_out.size); INIT_WORK(&sgThreadData.mwork, AudioCtrlWorkThread); sgThreadData.pWorkqueue_AudioControl = create_workqueue("AudioCtrlWq"); if(!sgThreadData.pWorkqueue_AudioControl) DEBUG("\n Error : Can not create work queue:AudioCtrlWq\n"); //create a semaphor for blocking sgThreadData.action_complete = OSSEMAPHORE_Create(0,0); return 0; }
static void _transfer_frame_init(void) { s_mipc_rx_buf = (u8*) __get_free_pages(GFP_KERNEL, get_order(MAX_MIPC_RX_FRAME_SIZE)); WARN_ON(NULL == s_mipc_rx_buf); if(kfifo_alloc(&s_mipc_rx_cache_kfifo,MAX_MIPC_RX_CACHE_SIZE, GFP_KERNEL)) { printk("_transfer_frame_init: kfifo rx cache no memory!\r\n"); panic("%s[%d]kfifo rx cache no memory", __FILE__, __LINE__); } _TxFreeFrameList_Init(&s_mipc_tx_free_frame_list); _TransferInit(&s_mipc_tx_tansfer); }
int dvb_hdhomerun_control_init() { int ret = misc_register(&hdhomerun_control_device); DEBUG_FUNC(1); if (ret) { printk(KERN_ERR "Unable to register hdhomerun_control device\n"); goto error; } /* Buffer for sending message between kernel/userspace */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) control_fifo_user = *(kfifo_alloc(control_bufsize, GFP_KERNEL, &control_spinlock_user)); #else kfifo_alloc(&control_fifo_user, control_bufsize, GFP_KERNEL); #endif if (IS_ERR(&control_fifo_user)) { return PTR_ERR(&control_fifo_user); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) control_fifo_kernel = *(kfifo_alloc(control_bufsize, GFP_KERNEL, &control_spinlock_kernel)); #else kfifo_alloc(&control_fifo_kernel, control_bufsize, GFP_KERNEL); #endif if (IS_ERR(&control_fifo_kernel)) { return PTR_ERR(&control_fifo_kernel); } init_waitqueue_head(&control_readq); init_waitqueue_head(&inq); init_waitqueue_head(&outq); error: return ret; }
int iscsi_tcp_r2tpool_alloc(struct iscsi_session *session) { int i; int cmd_i; /* * initialize per-task: R2T pool and xmit queue */ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { struct iscsi_task *task = session->cmds[cmd_i]; struct iscsi_tcp_task *tcp_task = task->dd_data; /* * pre-allocated x2 as much r2ts to handle race when * target acks DataOut faster than we data_xmit() queues * could replenish r2tqueue. */ /* R2T pool */ if (iscsi_pool_init(&tcp_task->r2tpool, session->max_r2t * 2, NULL, sizeof(struct iscsi_r2t_info))) { goto r2t_alloc_fail; } /* R2T xmit queue */ if (kfifo_alloc(&tcp_task->r2tqueue, session->max_r2t * 4 * sizeof(void*), GFP_KERNEL)) { iscsi_pool_free(&tcp_task->r2tpool); goto r2t_alloc_fail; } spin_lock_init(&tcp_task->pool2queue); spin_lock_init(&tcp_task->queue2pool); } return 0; r2t_alloc_fail: for (i = 0; i < cmd_i; i++) { struct iscsi_task *task = session->cmds[i]; struct iscsi_tcp_task *tcp_task = task->dd_data; kfifo_free(&tcp_task->r2tqueue); iscsi_pool_free(&tcp_task->r2tpool); } return -ENOMEM; }
int smux_loopback_init(void) { int ret = 0; spin_lock_init(&hw_fn_lock); smux_loopback_wq = create_singlethread_workqueue("smux_loopback_wq"); if (IS_ERR(smux_loopback_wq)) { pr_err("%s: failed to create workqueue\n", __func__); return -ENOMEM; } ret |= kfifo_alloc(&smux_loop_pkt_fifo, SMUX_LOOP_FIFO_SIZE * sizeof(struct smux_pkt_t *), GFP_KERNEL); return ret; }
static int init_neo_proxy(struct _rpmsg_params *local, struct rpmsg_channel *rpmsg_chnl) { int status =0; /* Initialize mutex */ mutex_init(&local->sync_lock); /* Initialize wait queue head that provides blocking rx for userspace */ init_waitqueue_head(&local->usr_wait_q); /* Allocate kfifo for rpmsg */ status = kfifo_alloc(&local->rpmsg_kfifo, RPMSG_KFIFO_SIZE, GFP_KERNEL); kfifo_reset(&local->rpmsg_kfifo); if (status) { pr_err("ERROR: %s %d Failed to run kfifo_alloc. rc=%d\n", __FUNCTION__, __LINE__,status); goto error0; } local->rpmsg_chnl = rpmsg_chnl; local->block_flag = 0; local->ept = rpmsg_create_ept(local->rpmsg_chnl, rpmsg_proxy_dev_ept_cb, local, local->endpt); if (!local->ept) { pr_err("ERROR: %s %d Failed to create endpoint.\n", __FUNCTION__, __LINE__); goto error1; } goto out; //TCM rpmsg_destroy_ept(local->ept); error1: kfifo_free(&local->rpmsg_kfifo); error0: pr_err("ERROR: %s %d\n", __FUNCTION__, __LINE__); return -ENODEV; out: pr_info("%s %d\n", __FUNCTION__, __LINE__); return 0; }
/* * Used to (un)register raw event clients */ int ir_raw_event_register(struct rc_dev *dev) { int rc; struct ir_raw_handler *handler; if (!dev) return -EINVAL; dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL); if (!dev->raw) return -ENOMEM; dev->raw->dev = dev; rc_set_enabled_protocols(dev, ~0); rc = kfifo_alloc(&dev->raw->kfifo, sizeof(struct ir_raw_event) * MAX_IR_EVENT_SIZE, GFP_KERNEL); if (rc < 0) goto out; spin_lock_init(&dev->raw->lock); dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%ld", dev->devno); if (IS_ERR(dev->raw->thread)) { rc = PTR_ERR(dev->raw->thread); goto out; } mutex_lock(&ir_raw_handler_lock); list_add_tail(&dev->raw->list, &ir_raw_client_list); list_for_each_entry(handler, &ir_raw_handler_list, list) if (handler->raw_register) handler->raw_register(dev); mutex_unlock(&ir_raw_handler_lock); return 0; out: kfree(dev->raw); dev->raw = NULL; return rc; }