void VMCIHost_InitContext(VMCIHost *hostContext, // IN uintptr_t eventHnd) // IN: Unused { init_waitqueue_head(&hostContext->waitQueue); }
static int __init ksb_init(void) { struct ks_bridge *ksb; int num_instances = 0; int ret = 0; int i; dbg_dir = debugfs_create_dir("ks_bridge", NULL); if (IS_ERR(dbg_dir)) pr_err("unable to create debug dir"); for (i = 0; i < NO_BRIDGE_INSTANCES; i++) { ksb = kzalloc(sizeof(struct ks_bridge), GFP_KERNEL); if (!ksb) { pr_err("unable to allocat mem for ks_bridge"); ret = -ENOMEM; goto dev_free; } __ksb[i] = ksb; ksb->name = kasprintf(GFP_KERNEL, "ks_bridge:%i", i + 1); if (!ksb->name) { pr_info("unable to allocate name"); kfree(ksb); ret = -ENOMEM; goto dev_free; } spin_lock_init(&ksb->lock); INIT_LIST_HEAD(&ksb->to_mdm_list); INIT_LIST_HEAD(&ksb->to_ks_list); init_waitqueue_head(&ksb->ks_wait_q); init_waitqueue_head(&ksb->pending_urb_wait); ksb->wq = create_singlethread_workqueue(ksb->name); if (!ksb->wq) { pr_err("unable to allocate workqueue"); kfree(ksb->name); kfree(ksb); ret = -ENOMEM; goto dev_free; } INIT_WORK(&ksb->to_mdm_work, ksb_tomdm_work); INIT_WORK(&ksb->start_rx_work, ksb_start_rx_work); init_usb_anchor(&ksb->submitted); ksb->dbg_idx = 0; ksb->dbg_lock = __RW_LOCK_UNLOCKED(lck); if (!IS_ERR(dbg_dir)) debugfs_create_file(ksb->name, S_IRUGO, dbg_dir, ksb, &dbg_fops); num_instances++; } ret = usb_register(&ksb_usb_driver); if (ret) { pr_err("unable to register ks bridge driver"); goto dev_free; } pr_info("init done"); return 0; dev_free: if (!IS_ERR(dbg_dir)) debugfs_remove_recursive(dbg_dir); for (i = 0; i < num_instances; i++) { ksb = __ksb[i]; destroy_workqueue(ksb->wq); kfree(ksb->name); kfree(ksb); } return ret; }
static int __init mmc_omap_probe(struct platform_device *pdev) { struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; struct mmc_omap_host *host = NULL; struct resource *res; int i, ret = 0; int irq; if (pdata == NULL) { dev_err(&pdev->dev, "platform data missing\n"); return -ENXIO; } if (pdata->nr_slots == 0) { dev_err(&pdev->dev, "no slots\n"); return -ENXIO; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (res == NULL || irq < 0) return -ENXIO; res = request_mem_region(res->start, resource_size(res), pdev->name); if (res == NULL) return -EBUSY; host = kzalloc(sizeof(struct mmc_omap_host), GFP_KERNEL); if (host == NULL) { ret = -ENOMEM; goto err_free_mem_region; } INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work); INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work); INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command); setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer, (unsigned long) host); spin_lock_init(&host->clk_lock); setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host); spin_lock_init(&host->dma_lock); setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host); spin_lock_init(&host->slot_lock); init_waitqueue_head(&host->slot_wq); host->pdata = pdata; host->dev = &pdev->dev; platform_set_drvdata(pdev, host); host->id = pdev->id; host->mem_res = res; host->irq = irq; host->use_dma = 1; host->dev->dma_mask = &pdata->dma_mask; host->dma_ch = -1; host->irq = irq; host->phys_base = host->mem_res->start; host->virt_base = ioremap(res->start, resource_size(res)); if (!host->virt_base) goto err_ioremap; host->iclk = clk_get(&pdev->dev, "ick"); if (IS_ERR(host->iclk)) { ret = PTR_ERR(host->iclk); goto err_free_mmc_host; } clk_enable(host->iclk); host->fclk = clk_get(&pdev->dev, "fck"); if (IS_ERR(host->fclk)) { ret = PTR_ERR(host->fclk); goto err_free_iclk; } ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host); if (ret) goto err_free_fclk; if (pdata->init != NULL) { ret = pdata->init(&pdev->dev); if (ret < 0) goto err_free_irq; } host->nr_slots = pdata->nr_slots; for (i = 0; i < pdata->nr_slots; i++) { ret = mmc_omap_new_slot(host, i); if (ret < 0) { while (--i >= 0) mmc_omap_remove_slot(host->slots[i]); goto err_plat_cleanup; } } host->reg_shift = (cpu_is_omap7xx() ? 1 : 2); return 0; err_plat_cleanup: if (pdata->cleanup) pdata->cleanup(&pdev->dev); err_free_irq: free_irq(host->irq, host); err_free_fclk: clk_put(host->fclk); err_free_iclk: clk_disable(host->iclk); clk_put(host->iclk); err_free_mmc_host: iounmap(host->virt_base); err_ioremap: kfree(host); err_free_mem_region: release_mem_region(res->start, resource_size(res)); return ret; }
/* * initialize a newly allocated inode. */ struct inode *ceph_alloc_inode(struct super_block *sb) { struct ceph_inode_info *ci; int i; ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS); if (!ci) return NULL; dout("alloc_inode %p\n", &ci->vfs_inode); ci->i_version = 0; ci->i_time_warp_seq = 0; ci->i_ceph_flags = 0; ci->i_release_count = 0; ci->i_symlink = NULL; ci->i_fragtree = RB_ROOT; mutex_init(&ci->i_fragtree_mutex); ci->i_xattrs.blob = NULL; ci->i_xattrs.prealloc_blob = NULL; ci->i_xattrs.dirty = false; ci->i_xattrs.index = RB_ROOT; ci->i_xattrs.count = 0; ci->i_xattrs.names_size = 0; ci->i_xattrs.vals_size = 0; ci->i_xattrs.version = 0; ci->i_xattrs.index_version = 0; ci->i_caps = RB_ROOT; ci->i_auth_cap = NULL; ci->i_dirty_caps = 0; ci->i_flushing_caps = 0; INIT_LIST_HEAD(&ci->i_dirty_item); INIT_LIST_HEAD(&ci->i_flushing_item); ci->i_cap_flush_seq = 0; ci->i_cap_flush_last_tid = 0; memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid)); init_waitqueue_head(&ci->i_cap_wq); ci->i_hold_caps_min = 0; ci->i_hold_caps_max = 0; INIT_LIST_HEAD(&ci->i_cap_delay_list); ci->i_cap_exporting_mds = 0; ci->i_cap_exporting_mseq = 0; ci->i_cap_exporting_issued = 0; INIT_LIST_HEAD(&ci->i_cap_snaps); ci->i_head_snapc = NULL; ci->i_snap_caps = 0; for (i = 0; i < CEPH_FILE_MODE_NUM; i++) ci->i_nr_by_mode[i] = 0; ci->i_truncate_seq = 0; ci->i_truncate_size = 0; ci->i_truncate_pending = 0; ci->i_max_size = 0; ci->i_reported_size = 0; ci->i_wanted_max_size = 0; ci->i_requested_max_size = 0; ci->i_pin_ref = 0; ci->i_rd_ref = 0; ci->i_rdcache_ref = 0; ci->i_wr_ref = 0; ci->i_wrbuffer_ref = 0; ci->i_wrbuffer_ref_head = 0; ci->i_shared_gen = 0; ci->i_rdcache_gen = 0; ci->i_rdcache_revoking = 0; INIT_LIST_HEAD(&ci->i_unsafe_writes); INIT_LIST_HEAD(&ci->i_unsafe_dirops); spin_lock_init(&ci->i_unsafe_lock); ci->i_snap_realm = NULL; INIT_LIST_HEAD(&ci->i_snap_realm_item); INIT_LIST_HEAD(&ci->i_snap_flush_item); INIT_WORK(&ci->i_wb_work, ceph_writeback_work); INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work); INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work); return &ci->vfs_inode; }
static int __init btlinux_port_init(void) { int ret; struct bt_diag_context *ctxt = &_context; struct bt_request *req; int i; info("Loading BRCM rfcomm driver %s", DRIVER_VERSION); /* Register BtLinuxPort as char device */ btport_major = register_chrdev(BTLINUXPORT_MAJOR, BTPORT_DEV_NAME, &port_fops); info("Registered btport chrdev, major number returned = %d", btport_major); if (btport_major < 0) { err("btlinux_port_init: unable to get major"); return btport_major; } ret = uart_register_driver(&btlinux_port_reg); if (ret) { err("uart_register_driver returns %d", ret); unregister_chrdev(btport_major, BTPORT_DEV_NAME); return ret; } ret = btlinux_register_ports(&btlinux_port_reg); if (ret) { err("btlinux_register_ports returns %d", ret); unregister_chrdev(btport_major, BTPORT_DEV_NAME); uart_unregister_driver(&btlinux_port_reg); } ret = platform_driver_register(&msm_smd_ch1_driver); if (ret < 0) printk(KERN_ERR "%s: Register driver fail\n", __func__); #if 0 /*registed in usb diag*/ ret = platform_device_register(&diag_plat_device); if (ret < 0) { printk(KERN_ERR "%s: Register device fail\n", __func__); platform_driver_unregister(&msm_smd_ch1_driver); } #endif ctxt->is2ARM11 = 0; ctxt->is7E = 0x7E; init_waitqueue_head(&ctxt->read_wq); atomic_set(&ctxt->open_excl, 0); atomic_set(&ctxt->read_excl, 0); atomic_set(&ctxt->write_excl, 0); atomic_set(&ctxt->enable_excl, 0); spin_lock_init(&ctxt->lock); spin_lock_init(&ctxt->lock_reg_num); for (i = 0; i < TABLE_SIZE; i++) ctxt->id_table[i] = 0; INIT_LIST_HEAD(&ctxt->rx_idle); INIT_LIST_HEAD(&ctxt->tx_idle); INIT_LIST_HEAD(&ctxt->rx_done); INIT_LIST_HEAD(&ctxt->tx_done); misc_register(&diag_device_fops); info("Registered uart driver"); for (i = 0; i < RX_REQ_MAX; i++) { req = bt_alloc_req(8192); if (req == 0) return -1; req->context = ctxt; bt_put_req(ctxt, &ctxt->rx_idle, req); } for (i = 0; i < TX_REQ_MAX; i++) { req = bt_alloc_req(8192); if (req == 0) return -1; req->context = ctxt; bt_put_req(ctxt, &ctxt->tx_idle, req); } wake_lock_init(&btport_wake_lock, WAKE_LOCK_SUSPEND, "BTPORT"); return ret; }
int s5p_g2d_probe(struct platform_device *pdev) { struct resource *res; int ret; #ifdef G2D_DEBUG printk("###########################s5p_g2d_probe called\n"); #endif /* find the IRQs */ s5p_g2d_irq_num = platform_get_irq(pdev, 0); if(s5p_g2d_irq_num <= 0) { printk(KERN_ERR "failed to get irq resouce\n"); return -ENOENT; } ret = request_irq(s5p_g2d_irq_num, s5p_g2d_irq, IRQF_DISABLED, pdev->name, NULL); if (ret) { printk("request_irq(g2d) failed.\n"); return ret; } /* get the memory region */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if(res == NULL) { printk(KERN_ERR "failed to get memory region resouce\n"); return -ENOENT; } s5p_g2d_mem = request_mem_region(res->start, res->end-res->start+1, pdev->name); if(s5p_g2d_mem == NULL) { printk(KERN_ERR "failed to reserve memory region\n"); return -ENOENT; } s5p_g2d_base = ioremap(s5p_g2d_mem->start, s5p_g2d_mem->end - res->start + 1); if(s5p_g2d_base == NULL) { printk(KERN_ERR "failed ioremap\n"); return -ENOENT; } #if 1 s5p_g2d_clock = clk_get(&pdev->dev, "clk_g2d"); if (IS_ERR(s5p_g2d_clock)) { printk(KERN_ERR "failed to find g2d clock source\n"); return -ENOENT; } clk_enable(s5p_g2d_clock); #endif init_waitqueue_head(&waitq_g2d); ret = misc_register(&s5p_g2d_dev); if (ret) { printk (KERN_ERR "cannot register miscdev on minor=%d (%d)\n", G2D_MINOR, ret); return ret; } h_rot_mutex = (struct mutex *)kmalloc(sizeof(struct mutex), GFP_KERNEL); if (h_rot_mutex == NULL) return -1; mutex_init(h_rot_mutex); clk_disable(s5p_g2d_clock); //#ifdef G2D_DEBUG printk(KERN_ALERT"##################### s5p_g2d_probe Success\n"); //#endif return 0; }
/* FIXME: check every exception case (goto) */ static int __devinit mfc_probe(struct platform_device *pdev) { struct resource *res; int ret; mfcdev = kzalloc(sizeof(struct mfc_dev), GFP_KERNEL); if (unlikely(mfcdev == NULL)) { dev_err(&pdev->dev, "failed to allocate control memory\n"); return -ENOMEM; } /* init. control structure */ sprintf(mfcdev->name, "%s", MFC_DEV_NAME); mutex_init(&mfcdev->lock); init_waitqueue_head(&mfcdev->wait_sys); init_waitqueue_head(&mfcdev->wait_codec[0]); init_waitqueue_head(&mfcdev->wait_codec[1]); atomic_set(&mfcdev->inst_cnt, 0); atomic_set(&mfcdev->busfreq_lock_cnt, 0); mfcdev->device = &pdev->dev; platform_set_drvdata(pdev, mfcdev); /* get the memory region */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (unlikely(res == NULL)) { dev_err(&pdev->dev, "no memory resource specified\n"); ret = -ENOENT; goto err_mem_res; } mfcdev->reg.rsrc_start = res->start; mfcdev->reg.rsrc_len = resource_size(res); /* request mem region for MFC register (0x0000 ~ 0xE008) */ res = request_mem_region(mfcdev->reg.rsrc_start, mfcdev->reg.rsrc_len, pdev->name); if (unlikely(res == NULL)) { dev_err(&pdev->dev, "failed to get memory region\n"); ret = -ENOENT; goto err_mem_req; } /* ioremap for MFC register */ mfcdev->reg.base = ioremap(mfcdev->reg.rsrc_start, mfcdev->reg.rsrc_len); if (unlikely(!mfcdev->reg.base)) { dev_err(&pdev->dev, "failed to ioremap memory region\n"); ret = -EINVAL; goto err_mem_map; } init_reg(mfcdev->reg.base); mfcdev->irq = platform_get_irq(pdev, 0); if (unlikely(mfcdev->irq < 0)) { dev_err(&pdev->dev, "no irq resource specified\n"); ret = -ENOENT; goto err_irq_res; } ret = request_irq(mfcdev->irq, mfc_irq, IRQF_DISABLED, mfcdev->name, mfcdev); if (ret) { dev_err(&pdev->dev, "failed to allocate irq (%d)\n", ret); goto err_irq_req; } /* * initialize PM(power, clock) interface */ ret = mfc_init_pm(mfcdev); if (ret < 0) { printk(KERN_ERR "failed to init. MFC PM interface\n"); goto err_pm_if; } /* * initialize memory manager */ ret = mfc_init_mem_mgr(mfcdev); if (ret < 0) { printk(KERN_ERR "failed to init. MFC memory manager\n"); goto err_mem_mgr; } /* * loading firmware */ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, MFC_FW_NAME, &pdev->dev, GFP_KERNEL, pdev, mfc_firmware_request_complete_handler); if (ret) { dev_err(&pdev->dev, "could not load firmware (err=%d)\n", ret); goto err_fw_req; } #if defined(SYSMMU_MFC_ON) && defined(CONFIG_VIDEO_MFC_VCM_UMP) ret = vcm_activate(mfcdev->vcm_info.sysmmu_vcm); if (ret < 0) { mfc_err("failed to activate VCM: %d", ret); goto err_act_vcm; } #endif /* * initialize buffer manager */ mfc_init_buf(); /* FIXME: final dec & enc */ mfc_init_decoders(); mfc_init_encoders(); ret = misc_register(&mfc_miscdev); if (ret) { mfc_err("MFC can't misc register on minor=%d\n", MFC_MINOR); goto err_misc_reg; } mfc_info("MFC(Multi Function Codec - FIMV v5.x) registered successfully\n"); return 0; err_misc_reg: mfc_final_buf(); #ifdef SYSMMU_MFC_ON #ifdef CONFIG_VIDEO_MFC_VCM_UMP mfc_clock_on(); vcm_deactivate(mfcdev->vcm_info.sysmmu_vcm); mfc_clock_off(); err_act_vcm: #endif mfc_clock_on(); sysmmu_off(SYSMMU_MFC_L); sysmmu_off(SYSMMU_MFC_R); mfc_clock_off(); #endif if (mfcdev->fw.info) release_firmware(mfcdev->fw.info); err_fw_req: /* FIXME: make kenel dump when probe fail */ mfc_clock_on(); mfc_final_mem_mgr(mfcdev); mfc_clock_off(); err_mem_mgr: mfc_final_pm(mfcdev); err_pm_if: free_irq(mfcdev->irq, mfcdev); err_irq_req: err_irq_res: iounmap(mfcdev->reg.base); err_mem_map: release_mem_region(mfcdev->reg.rsrc_start, mfcdev->reg.rsrc_len); err_mem_req: err_mem_res: platform_set_drvdata(pdev, NULL); mutex_destroy(&mfcdev->lock); kfree(mfcdev); return ret; }
static int r3964_open(struct tty_struct *tty) { struct r3964_info *pInfo; MOD_INC_USE_COUNT; TRACE_L("open"); TRACE_L("tty=%x, PID=%d, disc_data=%x", (int)tty, current->pid, (int)tty->disc_data); pInfo=kmalloc(sizeof(struct r3964_info), GFP_KERNEL); TRACE_M("r3964_open - info kmalloc %x",(int)pInfo); if(!pInfo) { printk(KERN_ERR "r3964: failed to alloc info structure\n"); return -ENOMEM; } pInfo->rx_buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL); TRACE_M("r3964_open - rx_buf kmalloc %x",(int)pInfo->rx_buf); if(!pInfo->rx_buf) { printk(KERN_ERR "r3964: failed to alloc receive buffer\n"); kfree(pInfo); TRACE_M("r3964_open - info kfree %x",(int)pInfo); return -ENOMEM; } pInfo->tx_buf = kmalloc(TX_BUF_SIZE, GFP_KERNEL); TRACE_M("r3964_open - tx_buf kmalloc %x",(int)pInfo->tx_buf); if(!pInfo->tx_buf) { printk(KERN_ERR "r3964: failed to alloc transmit buffer\n"); kfree(pInfo->rx_buf); TRACE_M("r3964_open - rx_buf kfree %x",(int)pInfo->rx_buf); kfree(pInfo); TRACE_M("r3964_open - info kfree %x",(int)pInfo); return -ENOMEM; } pInfo->tty = tty; init_waitqueue_head (&pInfo->read_wait); pInfo->priority = R3964_MASTER; pInfo->rx_first = pInfo->rx_last = NULL; pInfo->tx_first = pInfo->tx_last = NULL; pInfo->rx_position = 0; pInfo->tx_position = 0; pInfo->last_rx = 0; pInfo->blocks_in_rx_queue = 0; pInfo->firstClient=NULL; pInfo->state=R3964_IDLE; pInfo->flags = R3964_DEBUG; pInfo->count_down = 0; pInfo->nRetry = 0; tty->disc_data = pInfo; /* * Add 'on_timer' to timer task queue * (will be called from timer bh) */ INIT_LIST_HEAD(&pInfo->bh_1.list); pInfo->bh_1.sync = 0; pInfo->bh_1.routine = &on_timer_1; pInfo->bh_1.data = pInfo; INIT_LIST_HEAD(&pInfo->bh_2.list); pInfo->bh_2.sync = 0; pInfo->bh_2.routine = &on_timer_2; pInfo->bh_2.data = pInfo; queue_task(&pInfo->bh_1, &tq_timer); return 0; }
static int acm_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_cdc_union_desc *union_header = NULL; struct usb_cdc_country_functional_desc *cfd = NULL; unsigned char *buffer = intf->altsetting->extra; int buflen = intf->altsetting->extralen; struct usb_interface *control_interface; struct usb_interface *data_interface; struct usb_endpoint_descriptor *epctrl = NULL; struct usb_endpoint_descriptor *epread = NULL; struct usb_endpoint_descriptor *epwrite = NULL; struct usb_device *usb_dev = interface_to_usbdev(intf); struct acm *acm; int minor; int ctrlsize, readsize; u8 *buf; u8 ac_management_function = 0; u8 call_management_function = 0; int call_interface_num = -1; int data_interface_num; unsigned long quirks; int num_rx_buf; int i; int combined_interfaces = 0; /* normal quirks */ quirks = (unsigned long)id->driver_info; num_rx_buf = (quirks == SINGLE_RX_URB) ? 1 : ACM_NR; /* handle quirks deadly to normal probing*/ if (quirks == NO_UNION_NORMAL) { data_interface = usb_ifnum_to_if(usb_dev, 1); control_interface = usb_ifnum_to_if(usb_dev, 0); goto skip_normal_probe; } /* normal probing*/ if (!buffer) { dev_err(&intf->dev, "Weird descriptor references\n"); return -EINVAL; } if (!buflen) { if (intf->cur_altsetting->endpoint->extralen && intf->cur_altsetting->endpoint->extra) { dev_dbg(&intf->dev, "Seeking extra descriptors on endpoint\n"); buflen = intf->cur_altsetting->endpoint->extralen; buffer = intf->cur_altsetting->endpoint->extra; } else { dev_err(&intf->dev, "Zero length descriptor references\n"); return -EINVAL; } } while (buflen > 0) { if (buffer[1] != USB_DT_CS_INTERFACE) { dev_err(&intf->dev, "skipping garbage\n"); goto next_desc; } switch (buffer[2]) { case USB_CDC_UNION_TYPE: /* we've found it */ if (union_header) { dev_err(&intf->dev, "More than one " "union descriptor, skipping ...\n"); goto next_desc; } union_header = (struct usb_cdc_union_desc *)buffer; break; case USB_CDC_COUNTRY_TYPE: /* export through sysfs*/ cfd = (struct usb_cdc_country_functional_desc *)buffer; break; case USB_CDC_HEADER_TYPE: /* maybe check version */ break; /* for now we ignore it */ case USB_CDC_ACM_TYPE: ac_management_function = buffer[3]; break; case USB_CDC_CALL_MANAGEMENT_TYPE: call_management_function = buffer[3]; call_interface_num = buffer[4]; if ((call_management_function & 3) != 3) dev_err(&intf->dev, "This device cannot do calls on its own. It is not a modem.\n"); break; default: /* there are LOTS more CDC descriptors that * could legitimately be found here. */ dev_dbg(&intf->dev, "Ignoring descriptor: " "type %02x, length %d\n", buffer[2], buffer[0]); break; } next_desc: buflen -= buffer[0]; buffer += buffer[0]; } if (!union_header) { if (call_interface_num > 0) { dev_dbg(&intf->dev, "No union descriptor, using call management descriptor\n"); data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = call_interface_num)); control_interface = intf; } else { if (intf->cur_altsetting->desc.bNumEndpoints != 3) { dev_dbg(&intf->dev,"No union descriptor, giving up\n"); return -ENODEV; } else { dev_warn(&intf->dev,"No union descriptor, testing for castrated device\n"); combined_interfaces = 1; control_interface = data_interface = intf; goto look_for_collapsed_interface; } } } else { control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0); data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = union_header->bSlaveInterface0)); if (!control_interface || !data_interface) { dev_dbg(&intf->dev, "no interfaces\n"); return -ENODEV; } } if (data_interface_num != call_interface_num) dev_dbg(&intf->dev, "Separate call control interface. That is not fully supported.\n"); if (control_interface == data_interface) { /* some broken devices designed for windows work this way */ dev_warn(&intf->dev,"Control and data interfaces are not separated!\n"); combined_interfaces = 1; /* a popular other OS doesn't use it */ quirks |= NO_CAP_LINE; if (data_interface->cur_altsetting->desc.bNumEndpoints != 3) { dev_err(&intf->dev, "This needs exactly 3 endpoints\n"); return -EINVAL; } look_for_collapsed_interface: for (i = 0; i < 3; i++) { struct usb_endpoint_descriptor *ep; ep = &data_interface->cur_altsetting->endpoint[i].desc; if (usb_endpoint_is_int_in(ep)) epctrl = ep; else if (usb_endpoint_is_bulk_out(ep)) epwrite = ep; else if (usb_endpoint_is_bulk_in(ep)) epread = ep; else return -EINVAL; } if (!epctrl || !epread || !epwrite) return -ENODEV; else goto made_compressed_probe; } skip_normal_probe: /*workaround for switched interfaces */ if (data_interface->cur_altsetting->desc.bInterfaceClass != CDC_DATA_INTERFACE_TYPE) { if (control_interface->cur_altsetting->desc.bInterfaceClass == CDC_DATA_INTERFACE_TYPE) { struct usb_interface *t; dev_dbg(&intf->dev, "Your device has switched interfaces.\n"); t = control_interface; control_interface = data_interface; data_interface = t; } else { return -EINVAL; } } /* Accept probe requests only for the control interface */ if (!combined_interfaces && intf != control_interface) return -ENODEV; if (!combined_interfaces && usb_interface_claimed(data_interface)) { /* valid in this context */ dev_dbg(&intf->dev, "The data interface isn't available\n"); return -EBUSY; } if (data_interface->cur_altsetting->desc.bNumEndpoints < 2) return -EINVAL; epctrl = &control_interface->cur_altsetting->endpoint[0].desc; epread = &data_interface->cur_altsetting->endpoint[0].desc; epwrite = &data_interface->cur_altsetting->endpoint[1].desc; /* workaround for switched endpoints */ if (!usb_endpoint_dir_in(epread)) { /* descriptors are swapped */ struct usb_endpoint_descriptor *t; dev_dbg(&intf->dev, "The data interface has switched endpoints\n"); t = epread; epread = epwrite; epwrite = t; } made_compressed_probe: dbg("interfaces are valid"); for (minor = 0; minor < ACM_TTY_MINORS && acm_table[minor]; minor++); if (minor == ACM_TTY_MINORS) { dev_err(&intf->dev, "no more free acm devices\n"); return -ENODEV; } acm = kzalloc(sizeof(struct acm), GFP_KERNEL); if (acm == NULL) { dev_dbg(&intf->dev, "out of memory (acm kzalloc)\n"); goto alloc_fail; } ctrlsize = le16_to_cpu(epctrl->wMaxPacketSize); readsize = le16_to_cpu(epread->wMaxPacketSize) * (quirks == SINGLE_RX_URB ? 1 : 2); acm->combined_interfaces = combined_interfaces; acm->writesize = le16_to_cpu(epwrite->wMaxPacketSize) * 20; acm->control = control_interface; acm->data = data_interface; acm->minor = minor; acm->dev = usb_dev; acm->ctrl_caps = ac_management_function; if (quirks & NO_CAP_LINE) acm->ctrl_caps &= ~USB_CDC_CAP_LINE; acm->ctrlsize = ctrlsize; acm->readsize = readsize; acm->rx_buflimit = num_rx_buf; acm->urb_task.func = acm_rx_tasklet; acm->urb_task.data = (unsigned long) acm; INIT_WORK(&acm->work, acm_softint); INIT_WORK(&acm->waker, acm_waker); init_waitqueue_head(&acm->drain_wait); spin_lock_init(&acm->throttle_lock); spin_lock_init(&acm->write_lock); spin_lock_init(&acm->read_lock); mutex_init(&acm->mutex); acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); acm->is_int_ep = usb_endpoint_xfer_int(epread); if (acm->is_int_ep) acm->bInterval = epread->bInterval; tty_port_init(&acm->port); acm->port.ops = &acm_port_ops; buf = usb_buffer_alloc(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma); if (!buf) { dev_dbg(&intf->dev, "out of memory (ctrl buffer alloc)\n"); goto alloc_fail2; } acm->ctrl_buffer = buf; if (acm_write_buffers_alloc(acm) < 0) { dev_dbg(&intf->dev, "out of memory (write buffer alloc)\n"); goto alloc_fail4; } acm->ctrlurb = usb_alloc_urb(0, GFP_KERNEL); if (!acm->ctrlurb) { dev_dbg(&intf->dev, "out of memory (ctrlurb kmalloc)\n"); goto alloc_fail5; } for (i = 0; i < num_rx_buf; i++) { struct acm_ru *rcv = &(acm->ru[i]); rcv->urb = usb_alloc_urb(0, GFP_KERNEL); if (rcv->urb == NULL) { dev_dbg(&intf->dev, "out of memory (read urbs usb_alloc_urb)\n"); goto alloc_fail7; } rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; rcv->instance = acm; } for (i = 0; i < num_rx_buf; i++) { struct acm_rb *rb = &(acm->rb[i]); rb->base = usb_buffer_alloc(acm->dev, readsize, GFP_KERNEL, &rb->dma); if (!rb->base) { dev_dbg(&intf->dev, "out of memory (read bufs usb_buffer_alloc)\n"); goto alloc_fail7; } } for (i = 0; i < ACM_NW; i++) { struct acm_wb *snd = &(acm->wb[i]); snd->urb = usb_alloc_urb(0, GFP_KERNEL); if (snd->urb == NULL) { dev_dbg(&intf->dev, "out of memory (write urbs usb_alloc_urb)"); goto alloc_fail7; } if (usb_endpoint_xfer_int(epwrite)) usb_fill_int_urb(snd->urb, usb_dev, usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress), NULL, acm->writesize, acm_write_bulk, snd, epwrite->bInterval); else usb_fill_bulk_urb(snd->urb, usb_dev, usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress), NULL, acm->writesize, acm_write_bulk, snd); snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; snd->instance = acm; } usb_set_intfdata(intf, acm); i = device_create_file(&intf->dev, &dev_attr_bmCapabilities); if (i < 0) goto alloc_fail8; if (cfd) { /* export the country data */ acm->country_codes = kmalloc(cfd->bLength - 4, GFP_KERNEL); if (!acm->country_codes) goto skip_countries; acm->country_code_size = cfd->bLength - 4; memcpy(acm->country_codes, (u8 *)&cfd->wCountyCode0, cfd->bLength - 4); acm->country_rel_date = cfd->iCountryCodeRelDate; i = device_create_file(&intf->dev, &dev_attr_wCountryCodes); if (i < 0) { kfree(acm->country_codes); goto skip_countries; } i = device_create_file(&intf->dev, &dev_attr_iCountryCodeRelDate); if (i < 0) { kfree(acm->country_codes); goto skip_countries; } } skip_countries: usb_fill_int_urb(acm->ctrlurb, usb_dev, usb_rcvintpipe(usb_dev, epctrl->bEndpointAddress), acm->ctrl_buffer, ctrlsize, acm_ctrl_irq, acm, /* works around buggy devices */ epctrl->bInterval ? epctrl->bInterval : 0xff); acm->ctrlurb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; acm->ctrlurb->transfer_dma = acm->ctrl_dma; dev_info(&intf->dev, "ttyACM%d: USB ACM device\n", minor); acm_set_control(acm, acm->ctrlout); acm->line.dwDTERate = cpu_to_le32(9600); acm->line.bDataBits = 8; acm_set_line(acm, &acm->line); usb_driver_claim_interface(&acm_driver, data_interface, acm); usb_set_intfdata(data_interface, acm); usb_get_intf(control_interface); tty_register_device(acm_tty_driver, minor, &control_interface->dev); acm_table[minor] = acm; return 0; alloc_fail8: for (i = 0; i < ACM_NW; i++) usb_free_urb(acm->wb[i].urb); alloc_fail7: acm_read_buffers_free(acm); for (i = 0; i < num_rx_buf; i++) usb_free_urb(acm->ru[i].urb); usb_free_urb(acm->ctrlurb); alloc_fail5: acm_write_buffers_free(acm); alloc_fail4: usb_buffer_free(usb_dev, ctrlsize, acm->ctrl_buffer, acm->ctrl_dma); alloc_fail2: kfree(acm); alloc_fail: return -ENOMEM; }
static int mtd_ioctl(struct inode *inode, struct file *file, u_int cmd, u_long arg) { struct mtd_info *mtd = file->private_data; void __user *argp = (void __user *)arg; int ret = 0; u_long size; DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n"); size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; if (cmd & IOC_IN) { if (!access_ok(VERIFY_READ, argp, size)) return -EFAULT; } if (cmd & IOC_OUT) { if (!access_ok(VERIFY_WRITE, argp, size)) return -EFAULT; } switch (cmd) { case MEMGETREGIONCOUNT: if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) return -EFAULT; break; case MEMGETREGIONINFO: { struct region_info_user ur; if (copy_from_user(&ur, argp, sizeof(struct region_info_user))) return -EFAULT; if (ur.regionindex >= mtd->numeraseregions) return -EINVAL; if (copy_to_user(argp, &(mtd->eraseregions[ur.regionindex]), sizeof(struct mtd_erase_region_info))) return -EFAULT; break; } case MEMGETINFO: if (copy_to_user(argp, mtd, sizeof(struct mtd_info_user))) return -EFAULT; break; case MEMERASE: { struct erase_info *erase; if(!(file->f_mode & 2)) return -EPERM; erase=kmalloc(sizeof(struct erase_info),GFP_KERNEL); if (!erase) ret = -ENOMEM; else { wait_queue_head_t waitq; DECLARE_WAITQUEUE(wait, current); init_waitqueue_head(&waitq); memset (erase,0,sizeof(struct erase_info)); if (copy_from_user(&erase->addr, argp, sizeof(struct erase_info_user))) { kfree(erase); return -EFAULT; } erase->mtd = mtd; erase->callback = mtdchar_erase_callback; erase->priv = (unsigned long)&waitq; /* FIXME: Allow INTERRUPTIBLE. Which means not having the wait_queue head on the stack. If the wq_head is on the stack, and we leave because we got interrupted, then the wq_head is no longer there when the callback routine tries to wake us up. */ ret = mtd->erase(mtd, erase); if (!ret) { set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&waitq, &wait); if (erase->state != MTD_ERASE_DONE && erase->state != MTD_ERASE_FAILED) schedule(); remove_wait_queue(&waitq, &wait); set_current_state(TASK_RUNNING); ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0; } kfree(erase); } break; } case MEMWRITEOOB: { struct mtd_oob_buf buf; void *databuf; ssize_t retlen; if(!(file->f_mode & 2)) return -EPERM; if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf))) return -EFAULT; if (buf.length > 0x4096) return -EINVAL; if (!mtd->write_oob) ret = -EOPNOTSUPP; else ret = access_ok(VERIFY_READ, buf.ptr, buf.length) ? 0 : EFAULT; if (ret) return ret; databuf = kmalloc(buf.length, GFP_KERNEL); if (!databuf) return -ENOMEM; if (copy_from_user(databuf, buf.ptr, buf.length)) { kfree(databuf); return -EFAULT; } ret = (mtd->write_oob)(mtd, buf.start, buf.length, &retlen, databuf); if (copy_to_user(argp + sizeof(uint32_t), &retlen, sizeof(uint32_t))) ret = -EFAULT; kfree(databuf); break; } case MEMREADOOB: { struct mtd_oob_buf buf; void *databuf; ssize_t retlen; if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf))) return -EFAULT; if (buf.length > 0x4096) return -EINVAL; if (!mtd->read_oob) ret = -EOPNOTSUPP; else ret = access_ok(VERIFY_WRITE, buf.ptr, buf.length) ? 0 : -EFAULT; if (ret) return ret; databuf = kmalloc(buf.length, GFP_KERNEL); if (!databuf) return -ENOMEM; ret = (mtd->read_oob)(mtd, buf.start, buf.length, &retlen, databuf); if (put_user(retlen, (uint32_t __user *)argp)) ret = -EFAULT; else if (retlen && copy_to_user(buf.ptr, databuf, retlen)) ret = -EFAULT; kfree(databuf); break; } case MEMLOCK: { struct erase_info_user info; if (copy_from_user(&info, argp, sizeof(info))) return -EFAULT; if (!mtd->lock) ret = -EOPNOTSUPP; else ret = mtd->lock(mtd, info.start, info.length); break; } case MEMUNLOCK: { struct erase_info_user info; if (copy_from_user(&info, argp, sizeof(info))) return -EFAULT; if (!mtd->unlock) ret = -EOPNOTSUPP; else ret = mtd->unlock(mtd, info.start, info.length); break; } case MEMSETOOBSEL: { if (copy_from_user(&mtd->oobinfo, argp, sizeof(struct nand_oobinfo))) return -EFAULT; break; } case MEMGETOOBSEL: { if (copy_to_user(argp, &(mtd->oobinfo), sizeof(struct nand_oobinfo))) return -EFAULT; break; } case MEMGETBADBLOCK: { loff_t offs; if (copy_from_user(&offs, argp, sizeof(loff_t))) return -EFAULT; if (!mtd->block_isbad) ret = -EOPNOTSUPP; else return mtd->block_isbad(mtd, offs); break; } case MEMSETBADBLOCK: { loff_t offs; if (copy_from_user(&offs, argp, sizeof(loff_t))) return -EFAULT; if (!mtd->block_markbad) ret = -EOPNOTSUPP; else return mtd->block_markbad(mtd, offs); break; } default: ret = -ENOTTY; } return ret; } /* memory_ioctl */
static int jpeg_probe(struct platform_device *pdev) { struct class_device; int ret; struct class_device *class_dev = NULL; JPEG_MSG("-------------jpeg driver probe-------\n"); ret = alloc_chrdev_region(&jpeg_devno, 0, 1, JPEG_DEVNAME); if(ret) { JPEG_ERR("Error: Can't Get Major number for JPEG Device\n"); } else { JPEG_MSG("Get JPEG Device Major number (%d)\n", jpeg_devno); } jpeg_cdev = cdev_alloc(); jpeg_cdev->owner = THIS_MODULE; jpeg_cdev->ops = &jpeg_fops; ret = cdev_add(jpeg_cdev, jpeg_devno, 1); jpeg_class = class_create(THIS_MODULE, JPEG_DEVNAME); class_dev = (struct class_device *)device_create(jpeg_class, NULL, jpeg_devno, NULL, JPEG_DEVNAME); spin_lock_init(&jpeg_dec_lock); spin_lock_init(&jpeg_enc_lock); // initial codec, register codec ISR dec_status = 0; enc_status = 0; _jpeg_dec_int_status = 0; _jpeg_enc_int_status = 0; _jpeg_dec_mode = 0; #ifndef FPGA_VERSION #ifdef JPEG_DEC_DRIVER init_waitqueue_head(&dec_wait_queue); #endif init_waitqueue_head(&enc_wait_queue); //mt6575_irq_set_sens(MT6575_JPEG_CODEC_IRQ_ID, MT65xx_LEVEL_SENSITIVE); //mt6575_irq_set_polarity(MT6575_JPEG_CODEC_IRQ_ID, MT65xx_POLARITY_LOW); //mt6575_irq_unmask(MT6575_JPEG_CODEC_IRQ_ID); JPEG_MSG("request JPEG Encoder IRQ \n"); enable_irq(MT6582_JPEG_ENC_IRQ_ID); if(request_irq(MT6582_JPEG_ENC_IRQ_ID, jpeg_drv_enc_isr, IRQF_TRIGGER_LOW, "jpeg_enc_driver" , NULL)) //if(request_irq(MT6582_JPEG_ENC_IRQ_ID, jpeg_drv_enc_isr, /*IRQF_TRIGGER_RISING*/ IRQF_TRIGGER_HIGH, "jpeg_enc_driver" , NULL)) //if(request_irq(MT6582_JPEG_ENC_IRQ_ID, jpeg_drv_enc_isr, IRQF_TRIGGER_RISING , "jpeg_enc_driver" , NULL)) { JPEG_ERR("JPEG ENC Driver request irq failed\n"); } #ifdef JPEG_DEC_DRIVER enable_irq(MT6589_JPEG_DEC_IRQ_ID); JPEG_MSG("request JPEG Decoder IRQ \n"); //if(request_irq(MT6589_JPEG_DEC_IRQ_ID, jpeg_drv_dec_isr, IRQF_TRIGGER_LOW, "jpeg_dec_driver" , NULL)) //if(request_irq(MT6589_JPEG_DEC_IRQ_ID, jpeg_drv_dec_isr, /*IRQF_TRIGGER_RISING*/ IRQF_TRIGGER_HIGH, "jpeg_dec_driver" , NULL)) //if(request_irq(MT6589_JPEG_DEC_IRQ_ID, jpeg_drv_dec_isr, IRQF_TRIGGER_RISING , "jpeg_dec_driver" , NULL)) if(request_irq(MT6589_JPEG_DEC_IRQ_ID, jpeg_drv_dec_isr, IRQF_TRIGGER_FALLING , "jpeg_dec_driver" , NULL)) { JPEG_ERR("JPEG DEC Driver request irq failed\n"); } #endif #endif JPEG_MSG("JPEG Probe Done\n"); NOT_REFERENCED(class_dev); return 0; }
struct controller *pcie_init(struct pcie_device *dev) { struct controller *ctrl; u32 slot_cap, link_cap; struct pci_dev *pdev = dev->port; ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (!ctrl) { dev_err(&dev->device, "%s: Out of memory\n", __func__); goto abort; } ctrl->pcie = dev; if (!pci_pcie_cap(pdev)) { ctrl_err(ctrl, "Cannot find PCI Express capability\n"); goto abort_ctrl; } if (pciehp_readl(ctrl, PCI_EXP_SLTCAP, &slot_cap)) { ctrl_err(ctrl, "Cannot read SLOTCAP register\n"); goto abort_ctrl; } ctrl->slot_cap = slot_cap; mutex_init(&ctrl->ctrl_lock); init_waitqueue_head(&ctrl->queue); dbg_ctrl(ctrl); /* * Controller doesn't notify of command completion if the "No * Command Completed Support" bit is set in Slot Capability * register or the controller supports none of power * controller, attention led, power led and EMI. */ if (NO_CMD_CMPL(ctrl) || !(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl))) ctrl->no_cmd_complete = 1; /* Check if Data Link Layer Link Active Reporting is implemented */ if (pciehp_readl(ctrl, PCI_EXP_LNKCAP, &link_cap)) { ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); goto abort_ctrl; } if (link_cap & PCI_EXP_LNKCAP_DLLLARC) { ctrl_dbg(ctrl, "Link Active Reporting supported\n"); ctrl->link_active_reporting = 1; } /* Clear all remaining event bits in Slot Status register */ if (pciehp_writew(ctrl, PCI_EXP_SLTSTA, 0x1f)) goto abort_ctrl; /* Disable sotfware notification */ pcie_disable_notification(ctrl); ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", pdev->vendor, pdev->device, pdev->subsystem_vendor, pdev->subsystem_device); if (pcie_init_slot(ctrl)) goto abort_ctrl; return ctrl; abort_ctrl: kfree(ctrl); abort: return NULL; }
static int __init dma_copy_init(void) { int ret; dev_t devt; printk("dma_copy init\n"); dma_buf.src = dma_alloc_writecombine(NULL, BUF_SZ, &dma_buf.src_phys,GFP_KERNEL); if (!dma_buf.src ) { CL_DBG_PRINT("src-dma alloc failed\n"); ret = -ENOMEM; goto err_alloc_dma_src; } dma_buf.dst = dma_alloc_writecombine(NULL, BUF_SZ, &dma_buf.dst_phys, GFP_KERNEL); if (!dma_buf.dst) { CL_DBG_PRINT("dst-dma alloc failed\n"); ret = -ENOMEM; goto err_alloc_dma_dst; } ret = alloc_chrdev_region(&devt, 0, DEV_NUM, device_name); if (ret) { CL_DBG_PRINT("dma_copy: alloc_chardev_region failed!\n"); goto err_alloc_chrdev_region; } major = MAJOR(devt); cdev_init(&dma_copy_dev.cdev, &dma_copy_fops); dma_copy_dev.cdev.ops=&dma_copy_fops; dma_copy_dev.cdev.owner = THIS_MODULE; ret = cdev_add(&dma_copy_dev.cdev, devt, DEV_NUM); if (ret) { goto err_cdev_add; } dma_copy_class = class_create(THIS_MODULE,class_name); if(IS_ERR(dma_copy_class)) { CL_DBG_PRINT(" failed in creating class.\n"); ret = PTR_ERR(dma_copy_class); goto err_class_create; } if(IS_ERR(device_create(dma_copy_class,NULL,devt,NULL,device_name))) { CL_DBG_PRINT("device_create failed\n"); goto err_device_create; } dma_copy_dev.hw.dma_regs = ioremap(DMA3_BASE_ADDR, sizeof(struct dma_regs)); if (!dma_copy_dev.hw.dma_regs ) { CL_DBG_PRINT(" dma ioremap failed.\n"); ret = -ENOMEM; goto err_ioremap; } init_waitqueue_head(&dma_copy_dev.dma_waitq); ret = request_irq(INT_DMA3, dma_copy_irq, IRQF_SHARED, "dma3", &dma_copy_dev); if(ret) { CL_DBG_PRINT("dma3 busy\n"); goto err_req_irq; } return ret; err_req_irq: iounmap(dma_copy_dev.hw.dma_regs); err_ioremap: device_destroy(dma_copy_class, MKDEV(major, 0)); err_device_create: class_destroy(dma_copy_class); err_class_create: cdev_del(&dma_copy_dev.cdev); err_cdev_add: unregister_chrdev_region(MKDEV(major, 0), DEV_NUM); err_alloc_chrdev_region: dma_free_writecombine(NULL, BUF_SZ, dma_buf.dst, dma_buf.dst_phys); err_alloc_dma_dst: dma_free_writecombine(NULL, BUF_SZ, dma_buf.src, dma_buf.src_phys); err_alloc_dma_src: return ret; }
void VMCI_CreateEvent(VMCIEvent *event) // IN: { init_waitqueue_head(event); }
int line6_init_midi(struct usb_line6 *line6) { static struct snd_device_ops midi_ops = { .dev_free = snd_line6_midi_free, }; int err; struct snd_line6_midi *line6midi; if (!(line6->properties->capabilities & LINE6_BIT_CONTROL)) { return 0; } line6midi = kzalloc(sizeof(struct snd_line6_midi), GFP_KERNEL); if (line6midi == NULL) return -ENOMEM; err = line6_midibuf_init(&line6midi->midibuf_in, MIDI_BUFFER_SIZE, 0); if (err < 0) { kfree(line6midi); return err; } err = line6_midibuf_init(&line6midi->midibuf_out, MIDI_BUFFER_SIZE, 1); if (err < 0) { kfree(line6midi->midibuf_in.buf); kfree(line6midi); return err; } line6midi->line6 = line6; switch(line6->product) { case LINE6_DEVID_PODHD300: case LINE6_DEVID_PODHD500: line6midi->midi_mask_transmit = 1; line6midi->midi_mask_receive = 1; break; default: line6midi->midi_mask_transmit = 1; line6midi->midi_mask_receive = 4; } line6->line6midi = line6midi; err = snd_device_new(line6->card, SNDRV_DEV_RAWMIDI, line6midi, &midi_ops); if (err < 0) return err; snd_card_set_dev(line6->card, line6->ifcdev); err = snd_line6_new_midi(line6midi); if (err < 0) return err; err = device_create_file(line6->ifcdev, &dev_attr_midi_mask_transmit); if (err < 0) return err; err = device_create_file(line6->ifcdev, &dev_attr_midi_mask_receive); if (err < 0) return err; init_waitqueue_head(&line6midi->send_wait); spin_lock_init(&line6midi->send_urb_lock); spin_lock_init(&line6midi->midi_transmit_lock); return 0; }
static int sensors_adsp_probe(struct platform_device *pdev) { int ret = 0; sns_ctl.dev_class = class_create(THIS_MODULE, DRV_NAME); if (sns_ctl.dev_class == NULL) { pr_err("%s: class_create fail.\n", __func__); goto res_err; } ret = alloc_chrdev_region(&sns_ctl.dev_num, 0, 1, DRV_NAME); if (ret) { pr_err("%s: alloc_chrdev_region fail.\n", __func__); goto alloc_chrdev_region_err; } sns_ctl.dev = device_create(sns_ctl.dev_class, NULL, sns_ctl.dev_num, &sns_ctl, DRV_NAME); if (IS_ERR(sns_ctl.dev)) { pr_err("%s: device_create fail.\n", __func__); goto device_create_err; } sns_ctl.cdev = cdev_alloc(); if (sns_ctl.cdev == NULL) { pr_err("%s: cdev_alloc fail.\n", __func__); goto cdev_alloc_err; } cdev_init(sns_ctl.cdev, &sensors_adsp_fops); sns_ctl.cdev->owner = THIS_MODULE; ret = cdev_add(sns_ctl.cdev, sns_ctl.dev_num, 1); if (ret) { pr_err("%s: cdev_add fail.\n", __func__); goto cdev_add_err; } sns_ctl.sns_workqueue = alloc_workqueue("sns_ocmem", WQ_NON_REENTRANT, 0); if (!sns_ctl.sns_workqueue) { pr_err("%s: Failed to create work queue\n", __func__); goto cdev_add_err; } sns_ctl.smd_wq = alloc_workqueue("smd_wq", WQ_NON_REENTRANT, 0); if (!sns_ctl.smd_wq) { pr_err("%s: Failed to create work queue\n", __func__); goto cdev_add_err; } init_waitqueue_head(&sns_ctl.sns_wait); spin_lock_init(&sns_ctl.sns_lock); sns_ctl.ocmem_handle = NULL; sns_ctl.buf = NULL; sns_ctl.sns_ocmem_status = 0; sns_ctl.ocmem_enabled = true; sns_ctl.ocmem_nb.notifier_call = sns_ocmem_drv_cb; sns_ctl.smd_ch = NULL; sns_ctl.pdev = pdev; INIT_WORK(&sns_ctl.sns_work, sns_ocmem_main); INIT_WORK(&sns_ctl.smd_read_work, sns_ocmem_smd_read); queue_work(sns_ctl.sns_workqueue, &sns_ctl.sns_work); return 0; cdev_add_err: kfree(sns_ctl.cdev); cdev_alloc_err: device_destroy(sns_ctl.dev_class, sns_ctl.dev_num); device_create_err: unregister_chrdev_region(sns_ctl.dev_num, 1); alloc_chrdev_region_err: class_destroy(sns_ctl.dev_class); res_err: return -ENODEV; }
/* * Read in the ondisk dquot using dqtobp() then copy it to an incore version, * and release the buffer immediately. * * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed. */ int xfs_qm_dqread( struct xfs_mount *mp, xfs_dqid_t id, uint type, uint flags, struct xfs_dquot **O_dqpp) { struct xfs_dquot *dqp; struct xfs_disk_dquot *ddqp; struct xfs_buf *bp; struct xfs_trans *tp = NULL; int error; int cancelflags = 0; dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP); dqp->dq_flags = type; dqp->q_core.d_id = cpu_to_be32(id); dqp->q_mount = mp; INIT_LIST_HEAD(&dqp->q_lru); mutex_init(&dqp->q_qlock); init_waitqueue_head(&dqp->q_pinwait); /* * Because we want to use a counting completion, complete * the flush completion once to allow a single access to * the flush completion without blocking. */ init_completion(&dqp->q_flush); complete(&dqp->q_flush); /* * Make sure group quotas have a different lock class than user * quotas. */ switch (type) { case XFS_DQ_USER: /* uses the default lock class */ break; case XFS_DQ_GROUP: lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class); break; case XFS_DQ_PROJ: lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class); break; default: ASSERT(0); break; } XFS_STATS_INC(xs_qm_dquot); trace_xfs_dqread(dqp); if (flags & XFS_QMOPT_DQALLOC) { tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC); error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_dqalloc, XFS_QM_DQALLOC_SPACE_RES(mp), 0); if (error) goto error1; cancelflags = XFS_TRANS_RELEASE_LOG_RES; } /* * get a pointer to the on-disk dquot and the buffer containing it * dqp already knows its own type (GROUP/USER). */ error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags); if (error) { /* * This can happen if quotas got turned off (ESRCH), * or if the dquot didn't exist on disk and we ask to * allocate (ENOENT). */ trace_xfs_dqread_fail(dqp); cancelflags |= XFS_TRANS_ABORT; goto error1; } /* copy everything from disk dquot to the incore dquot */ memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t)); xfs_qm_dquot_logitem_init(dqp); /* * Reservation counters are defined as reservation plus current usage * to avoid having to add every time. */ dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount); dqp->q_res_icount = be64_to_cpu(ddqp->d_icount); dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount); /* initialize the dquot speculative prealloc thresholds */ xfs_dquot_set_prealloc_limits(dqp); /* Mark the buf so that this will stay incore a little longer */ xfs_buf_set_ref(bp, XFS_DQUOT_REF); /* * We got the buffer with a xfs_trans_read_buf() (in dqtobp()) * So we need to release with xfs_trans_brelse(). * The strategy here is identical to that of inodes; we lock * the dquot in xfs_qm_dqget() before making it accessible to * others. This is because dquots, like inodes, need a good level of * concurrency, and we don't want to take locks on the entire buffers * for dquot accesses. * Note also that the dquot buffer may even be dirty at this point, if * this particular dquot was repaired. We still aren't afraid to * brelse it because we have the changes incore. */ ASSERT(xfs_buf_islocked(bp)); xfs_trans_brelse(tp, bp); if (tp) { error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); if (error) goto error0; } *O_dqpp = dqp; return error; error1: if (tp) xfs_trans_cancel(tp, cancelflags); error0: xfs_qm_dqdestroy(dqp); *O_dqpp = NULL; return error; }
static int xiic_i2c_probe(struct platform_device *pdev) { struct xiic_i2c *i2c; struct xiic_i2c_platform_data *pdata; struct resource *res; int ret, irq; u8 i; u32 sr; i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); i2c->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(i2c->base)) return PTR_ERR(i2c->base); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; pdata = dev_get_platdata(&pdev->dev); /* hook up driver to tree */ platform_set_drvdata(pdev, i2c); i2c->adap = xiic_adapter; i2c_set_adapdata(&i2c->adap, i2c); i2c->adap.dev.parent = &pdev->dev; i2c->adap.dev.of_node = pdev->dev.of_node; mutex_init(&i2c->lock); init_waitqueue_head(&i2c->wait); ret = devm_request_threaded_irq(&pdev->dev, irq, xiic_isr, xiic_process, IRQF_ONESHOT, pdev->name, i2c); if (ret < 0) { dev_err(&pdev->dev, "Cannot claim IRQ\n"); return ret; } /* * Detect endianness * Try to reset the TX FIFO. Then check the EMPTY flag. If it is not * set, assume that the endianness was wrong and swap. */ i2c->endianness = LITTLE; xiic_setreg32(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_TX_FIFO_RESET_MASK); /* Reset is cleared in xiic_reinit */ sr = xiic_getreg32(i2c, XIIC_SR_REG_OFFSET); if (!(sr & XIIC_SR_TX_FIFO_EMPTY_MASK)) i2c->endianness = BIG; xiic_reinit(i2c); /* add i2c adapter to i2c tree */ ret = i2c_add_adapter(&i2c->adap); if (ret) { dev_err(&pdev->dev, "Failed to add adapter\n"); xiic_deinit(i2c); return ret; } if (pdata) { /* add in known devices to the bus */ for (i = 0; i < pdata->num_devices; i++) i2c_new_device(&i2c->adap, pdata->devices + i); } return 0; }
static int ubifs_fill_super(struct super_block *sb, void *data, int silent) { struct ubi_volume_desc *ubi = sb->s_fs_info; struct ubifs_info *c; struct inode *root; int err; c = kzalloc(sizeof(struct ubifs_info), GFP_KERNEL); if (!c) return -ENOMEM; spin_lock_init(&c->cnt_lock); spin_lock_init(&c->cs_lock); spin_lock_init(&c->buds_lock); spin_lock_init(&c->space_lock); spin_lock_init(&c->orphan_lock); init_rwsem(&c->commit_sem); mutex_init(&c->lp_mutex); mutex_init(&c->tnc_mutex); mutex_init(&c->log_mutex); mutex_init(&c->mst_mutex); mutex_init(&c->umount_mutex); init_waitqueue_head(&c->cmt_wq); c->buds = RB_ROOT; c->old_idx = RB_ROOT; c->size_tree = RB_ROOT; c->orph_tree = RB_ROOT; INIT_LIST_HEAD(&c->infos_list); INIT_LIST_HEAD(&c->idx_gc); INIT_LIST_HEAD(&c->replay_list); INIT_LIST_HEAD(&c->replay_buds); INIT_LIST_HEAD(&c->uncat_list); INIT_LIST_HEAD(&c->empty_list); INIT_LIST_HEAD(&c->freeable_list); INIT_LIST_HEAD(&c->frdi_idx_list); INIT_LIST_HEAD(&c->unclean_leb_list); INIT_LIST_HEAD(&c->old_buds); INIT_LIST_HEAD(&c->orph_list); INIT_LIST_HEAD(&c->orph_new); c->highest_inum = UBIFS_FIRST_INO; c->lhead_lnum = c->ltail_lnum = UBIFS_LOG_LNUM; ubi_get_volume_info(ubi, &c->vi); ubi_get_device_info(c->vi.ubi_num, &c->di); /* Re-open the UBI device in read-write mode */ c->ubi = ubi_open_volume(c->vi.ubi_num, c->vi.vol_id, UBI_READONLY); if (IS_ERR(c->ubi)) { err = PTR_ERR(c->ubi); goto out_free; } c->vfs_sb = sb; sb->s_fs_info = c; sb->s_magic = UBIFS_SUPER_MAGIC; sb->s_blocksize = UBIFS_BLOCK_SIZE; sb->s_blocksize_bits = UBIFS_BLOCK_SHIFT; sb->s_dev = c->vi.cdev; sb->s_maxbytes = c->max_inode_sz = key_max_inode_size(c); if (c->max_inode_sz > MAX_LFS_FILESIZE) sb->s_maxbytes = c->max_inode_sz = MAX_LFS_FILESIZE; if (c->rw_incompat) { ubifs_err("the file-system is not R/W-compatible"); ubifs_msg("on-flash format version is w%d/r%d, but software " "only supports up to version w%d/r%d", c->fmt_version, c->ro_compat_version, UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION); return -EROFS; } mutex_lock(&c->umount_mutex); err = mount_ubifs(c); if (err) { ubifs_assert(err < 0); goto out_unlock; } /* Read the root inode */ root = ubifs_iget(sb, UBIFS_ROOT_INO); if (IS_ERR(root)) { err = PTR_ERR(root); goto out_umount; } sb->s_root = NULL; mutex_unlock(&c->umount_mutex); return 0; out_umount: ubifs_umount(c); out_unlock: mutex_unlock(&c->umount_mutex); ubi_close_volume(c->ubi); out_free: kfree(c); return err; }
/* registe client on server */ struct dsm_client *dsm_register_client (struct dsm_dev *dev) { int i; int size; int conflict = -1; struct dsm_client *ptr = NULL; if(g_dsm_server.server_state != DSM_SERVER_INITED){ DSM_LOG_ERR("dsm server uninited\n"); goto out; } if(dev == NULL){ DSM_LOG_ERR("dsm_dev is NULL\n"); goto out; } /* memory barrier */ smp_rmb(); /* whether client list is full */ if(g_dsm_server.client_count < CLIENT_SIZE){ /* malloc memory for this client */ ptr = (struct dsm_client *)kzalloc((sizeof(struct dsm_client)+dev->buff_size), GFP_KERNEL); if(!ptr){ DSM_LOG_ERR("clients malloc failed\n"); goto out; } mutex_lock(&g_dsm_server.mtx_lock); /* try to find a free location on server */ for(i=0; i<CLIENT_SIZE; i++){ /* whether the client is free */ if(!test_bit(DSM_CLIENT_VAILD_BIT, &g_dsm_server.client_flag[i])) break; /* if client is not free,whether a same client is exist */ conflict = strncmp(g_dsm_server.client_list[i]->client_name, dev->name, CLIENT_NAME_LEN); if(!conflict){ DSM_LOG_ERR("new client %s conflict with No.%d client %s\n", dev->name, i, g_dsm_server.client_list[i]->client_name); break; } } /* init a client */ if(i < CLIENT_SIZE && conflict){ size = strlen(dev->name); size = (size < CLIENT_NAME_LEN) ? size : (CLIENT_NAME_LEN - 1); memcpy(ptr->client_name, dev->name, size); // need add a end symbol? size+1? ptr->client_id = i; ptr->cops = dev->fops; ptr->buff_size = dev->buff_size; init_waitqueue_head(&ptr->waitq); g_dsm_server.client_list[i] = ptr; set_bit(DSM_CLIENT_VAILD_BIT, &g_dsm_server.client_flag[i]); g_dsm_server.client_count++; smp_wmb(); }else{ /* if a same client is exist, donot registe */ DSM_LOG_ERR("clients register failed, index %d, conflict %d\n", i, conflict); kfree(ptr); ptr = NULL; } mutex_unlock(&g_dsm_server.mtx_lock); } else DSM_LOG_INFO("clients has full\n"); out: return ptr; }
static int __init mwave_init(void) { int i; int retval = 0; pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd; PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_init entry\n"); memset(&mwave_s_mdd, 0, sizeof(MWAVE_DEVICE_DATA)); pDrvData->bBDInitialized = FALSE; pDrvData->bResourcesClaimed = FALSE; pDrvData->bDSPEnabled = FALSE; pDrvData->bDSPReset = FALSE; pDrvData->bMwaveDevRegistered = FALSE; pDrvData->sLine = -1; for (i = 0; i < ARRAY_SIZE(pDrvData->IPCs); i++) { pDrvData->IPCs[i].bIsEnabled = FALSE; pDrvData->IPCs[i].bIsHere = FALSE; pDrvData->IPCs[i].usIntCount = 0; /* no ints received yet */ init_waitqueue_head(&pDrvData->IPCs[i].ipc_wait_queue); } retval = tp3780I_InitializeBoardData(&pDrvData->rBDData); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_init, return from tp3780I_InitializeBoardData" " retval %x\n", retval); if (retval) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::mwave_init: Error:" " Failed to initialize board data\n"); goto cleanup_error; } pDrvData->bBDInitialized = TRUE; retval = tp3780I_CalcResources(&pDrvData->rBDData); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_init, return from tp3780I_CalcResources" " retval %x\n", retval); if (retval) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd:mwave_init: Error:" " Failed to calculate resources\n"); goto cleanup_error; } retval = tp3780I_ClaimResources(&pDrvData->rBDData); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_init, return from tp3780I_ClaimResources" " retval %x\n", retval); if (retval) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd:mwave_init: Error:" " Failed to claim resources\n"); goto cleanup_error; } pDrvData->bResourcesClaimed = TRUE; retval = tp3780I_EnableDSP(&pDrvData->rBDData); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_init, return from tp3780I_EnableDSP" " retval %x\n", retval); if (retval) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd:mwave_init: Error:" " Failed to enable DSP\n"); goto cleanup_error; } pDrvData->bDSPEnabled = TRUE; if (misc_register(&mwave_misc_dev) < 0) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd:mwave_init: Error:" " Failed to register misc device\n"); goto cleanup_error; } pDrvData->bMwaveDevRegistered = TRUE; pDrvData->sLine = register_serial_portandirq( pDrvData->rBDData.rDspSettings.usUartBaseIO, pDrvData->rBDData.rDspSettings.usUartIrq ); if (pDrvData->sLine < 0) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd:mwave_init: Error:" " Failed to register serial driver\n"); goto cleanup_error; } /* uart is registered */ #if 0 /* sysfs */ memset(&mwave_device, 0, sizeof (struct device)); snprintf(mwave_device.bus_id, BUS_ID_SIZE, "mwave"); if (device_register(&mwave_device)) goto cleanup_error; pDrvData->device_registered = TRUE; for (i = 0; i < ARRAY_SIZE(mwave_dev_attrs); i++) { if(device_create_file(&mwave_device, mwave_dev_attrs[i])) { PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd:mwave_init: Error:" " Failed to create sysfs file %s\n", mwave_dev_attrs[i]->attr.name); goto cleanup_error; } pDrvData->nr_registered_attrs++; } #endif /* SUCCESS! */ return 0; cleanup_error: PRINTK_ERROR(KERN_ERR_MWAVE "mwavedd::mwave_init: Error:" " Failed to initialize\n"); mwave_exit(); /* clean up */ return -EIO; }
static int pm860x_led_blink_handler_thread(void *d) { struct pm860x_led *led =(struct pm860x_led *)d; struct task_struct *tsk = current; struct sched_param param = { .sched_priority = 2 }; DEFINE_WAIT(led_blink_wait); /* set up thread context */ daemonize("pm860x_led_blink_handler_thread"); /* improve pm860x_led_blink_handler_thread priority */ sched_setscheduler(tsk, SCHED_FIFO, ¶m); //for(;;) while(1) { if (0 == led->blink_time) { prepare_to_wait(&led->blink_wait_queue, &led_blink_wait, TASK_INTERRUPTIBLE); if (0 == led->blink_time) schedule(); finish_wait(&led->blink_wait_queue, &led_blink_wait); } try_to_freeze(); if(led->color_green_blink_on && led->color_green_blink_off) { if (led->iset) { pm860x_set_bits(led->i2c, led->color_green_port, LED_CURRENT_MASK, led->iset); } msleep(led->color_green_blink_on); pm860x_set_bits(led->i2c, led->color_green_port, LED_CURRENT_MASK, 0); msleep(led->color_green_blink_off); } else { if (led->iset) { pm860x_set_bits(led->i2c, led->color_green_port, LED_CURRENT_MASK, led->iset); } led->blink_time = 0; } } return 0; } static void pm860x_led_work(struct work_struct *work) { struct pm860x_led *led; struct pm860x_chip *chip; int mask; led = container_of(work, struct pm860x_led, work); chip = led->chip; mutex_lock(&led->lock); if ((led->current_brightness == 0) && led->brightness) { if (led->iset) { pm860x_set_bits(led->i2c, __led_off(led->port), LED_CURRENT_MASK, led->iset); } pm860x_set_bits(led->i2c, __blink_off(led->port), LED_BLINK_MASK, LED_ON_CONTINUOUS); mask = __blink_ctl_mask(led->port); pm860x_set_bits(led->i2c, PM8606_WLED3B, mask, mask); } else if (led->brightness == 0) { pm860x_set_bits(led->i2c, __led_off(led->port), LED_CURRENT_MASK, 0); mask = __blink_ctl_mask(led->port); //pm860x_set_bits(led->i2c, PM8606_WLED3B, mask, 0); } pm860x_set_bits(led->i2c, __led_off(led->port), LED_PWM_MASK, led->brightness); led->current_brightness = led->brightness; dev_dbg(chip->dev, "Update LED. (reg:%d, brightness:%d)\n", __led_off(led->port), led->brightness); mutex_unlock(&led->lock); } static int pm860x_blink_set(struct led_classdev *cdev, unsigned long *delay_on, unsigned long *delay_off) { struct pm860x_led *data = container_of(cdev, struct pm860x_led, cdev); return 1; switch (data->port) { case PM8606_LED1_GREEN: data->color_green_blink_on = *delay_on; data->color_green_blink_off = *delay_off; data->color_green_port = __led_off(data->port); break; default: return 1; } data->blink_time = 1; wake_up_interruptible(&data->blink_wait_queue); return 1; } static void pm860x_led_set(struct led_classdev *cdev, enum led_brightness value) { struct pm860x_led *data = container_of(cdev, struct pm860x_led, cdev); data->brightness = value >> 3; if(cdev->flags & LED_SUSPENDED) { pm860x_led_suspend(data); //return; } schedule_work(&data->work); } static int __check_device(struct pm860x_led_pdata *pdata, char *name) { struct pm860x_led_pdata *p = pdata; int ret = -EINVAL; while (p && p->id) { if ((p->id != PM8606_ID_LED) || (p->flags < 0)) break; if (!strncmp(name, pm860x_led_name[p->flags], MFD_NAME_SIZE)) { ret = (int)p->flags; break; } p++; } return ret; } static int pm860x_led_probe(struct platform_device *pdev) { struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent); struct pm860x_platform_data *pm860x_pdata; struct pm860x_led_pdata *pdata; struct pm860x_led *data; struct resource *res; int ret; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (res == NULL) { dev_err(&pdev->dev, "No I/O resource!\n"); return -EINVAL; } if (pdev->dev.parent->platform_data) { pm860x_pdata = pdev->dev.parent->platform_data; pdata = pm860x_pdata->led; } else { dev_err(&pdev->dev, "No platform data!\n"); return -EINVAL; } data = kzalloc(sizeof(struct pm860x_led), GFP_KERNEL); if (data == NULL) return -ENOMEM; strncpy(data->name, res->name, MFD_NAME_SIZE-1); dev_set_drvdata(&pdev->dev, data); data->chip = chip; data->i2c = (chip->id == CHIP_PM8606) ? chip->client : chip->companion; data->iset = pdata->iset; data->port = __check_device(pdata, data->name); if (data->port < 0) { ret = -EINVAL; goto out; } data->current_brightness = 0; data->cdev.name = data->name; data->cdev.brightness_set = pm860x_led_set; data->cdev.blink_set = pm860x_blink_set; mutex_init(&data->lock); INIT_WORK(&data->work, pm860x_led_work); init_waitqueue_head(&data->blink_wait_queue); kernel_thread(pm860x_led_blink_handler_thread, data, 0); data->blink_time = 0; ret = led_classdev_register(chip->dev, &data->cdev); if (ret < 0) { dev_err(&pdev->dev, "Failed to register LED: %d\n", ret); goto out; } return 0; out: kfree(data); return ret; } static int pm860x_led_remove(struct platform_device *pdev) { struct pm860x_led *data = platform_get_drvdata(pdev); led_classdev_unregister(&data->cdev); kfree(data); return 0; } static struct platform_driver pm860x_led_driver = { .driver = { .name = "88pm860x-led", .owner = THIS_MODULE, }, .probe = pm860x_led_probe, .remove = pm860x_led_remove, };
void aodv(void) { //The queue holding all the events to be dealt with task *tmp_task; //Initalize the variables init_waitqueue_head(&aodv_wait); atomic_set(&kill_thread, 0); atomic_set(&aodv_is_dead, 0); //Name our thread /*lock_kernel(); sprintk(current->comm, "aodv-mcc"); //exit_mm(current); --> equivale a: current->mm = NULL; //we are in a kthread? aren't we? unlock_kernel();*/ //add_wait_queue_exclusive(event_socket->sk->sleep,&(aodv_wait)); // add_wait_queue(&(aodv_wait),event_socket->sk->sleep); //why would I ever want to stop ? :) for (;;) { //should the thread exit? if (atomic_read(&kill_thread)) { goto exit; } //goto sleep until we recieve an interupt interruptible_sleep_on(&aodv_wait); //should the thread exit? if (atomic_read(&kill_thread)) { goto exit; } //While the buffer is not empty while ((tmp_task = get_task()) != NULL) { u_int32_t dst; //takes a different action depending on what type of event is recieved switch (tmp_task->type) { //remove following case when DTN hell test end case TASK_DTN_HELLO: inet_aton("127.127.127.127",&dst); //extern u_int32_t dtn_hello_ip; gen_rreq(g_mesh_ip,dst,tmp_task->tos); #ifdef CaiDebug printk("-------DTN HELLO TASK---------\n"); #endif //insert_timer_simple(TASK_DTN_HELLO, 300*HELLO_INTERVAL, g_mesh_ip); //update_timer_queue(); break; //RREP case TASK_RECV_RREP: recv_rrep(tmp_task); kfree(tmp_task->data); break; //RERR case TASK_RECV_RERR: //printk("-----------\nget RERR from %s----------\n",inet_ntoa(tmp_task->src_ip)); recv_rerr(tmp_task); kfree(tmp_task->data); break; case TASK_RECV_HELLO: //printk("get HELLO from %s\n",inet_ntoa(tmp_task->src_ip)); recv_hello(tmp_task); kfree(tmp_task->data); break; /****************添加接收到通路包的任务***************/ #ifdef RECOVERYPATH case TASK_RECV_RCVP: //printk("Receive a RCVP\n"); recv_rcvp(tmp_task); kfree(tmp_task->data); break; case TASK_RECV_RRDP: //printk("Receive a RRDP\n"); recv_rrdp(tmp_task); kfree(tmp_task->data); break; #endif //Cleanup the Route Table and Flood ID queue case TASK_CLEANUP: flush_aodv_route_table(); break; case TASK_HELLO: //printk("gen HELLO\n"); gen_hello(); break; case TASK_ST: gen_st_rreq(); break; case TASK_GW_CLEANUP: update_gw_lifetimes(); insert_timer_simple(TASK_GW_CLEANUP, ACTIVE_GWROUTE_TIMEOUT, g_mesh_ip); update_timer_queue(); break; case TASK_NEIGHBOR: //printk("get NEIGHBOR TASH,delete neigh %s\n",inet_ntoa(tmp_task->src_ip)); delete_aodv_neigh(tmp_task->src_ip); break; case TASK_ROUTE_CLEANUP: flush_aodv_route_table(); break; case TASK_SEND_ETT: send_probe(tmp_task->dst_ip); break; //A small probe packet is received case TASK_RECV_S_ETT: recv_sprobe(tmp_task); kfree(tmp_task->data); break; //A large probe packet is received case TASK_RECV_L_ETT: recv_lprobe(tmp_task); kfree(tmp_task->data); break; case TASK_ETT_CLEANUP: reset_ett(find_aodv_neigh(tmp_task->src_ip)); printk("Reseting ETT-Info from neighbour %s\n", inet_ntoa(tmp_task->src_ip)); break; case TASK_NEIGHBOR_2H: delete_aodv_neigh_2h(tmp_task->src_ip); break; case TASK_RECV_RREQ: recv_rreq(tmp_task); kfree(tmp_task->data); break; case TASK_RESEND_RREQ: resend_rreq(tmp_task); break; case TASK_ETT_INFO: recv_ett_info(tmp_task); kfree(tmp_task->data); break; case TASK_SEND_RREP: gen_rrep(tmp_task->src_ip, tmp_task->dst_ip, tmp_task->tos); break; case TASK_RECV_STRREQ: recv_rreq_st(tmp_task); kfree(tmp_task->data); break; case TASK_UPDATE_LOAD: update_my_load(); break; case TASK_GEN_RREQ: gen_rreq(tmp_task->src_ip, tmp_task->dst_ip, tmp_task->tos); break; default: break; } kfree(tmp_task); } } exit: //Set the flag that shows you are dead atomic_set(&aodv_is_dead, 1); }
static int __devinit serial_m3110_probe(struct spi_device *spi) { struct uart_max3110 *max; void *buffer; u16 res; int ret = 0; max = kzalloc(sizeof(*max), GFP_KERNEL); if (!max) return -ENOMEM; /* Set spi info */ spi->bits_per_word = 16; max->clock = MAX3110_HIGH_CLK; spi_setup(spi); max->port.type = PORT_MAX3100; max->port.fifosize = 2; /* Only have 16b buffer */ max->port.ops = &serial_m3110_ops; max->port.line = 0; max->port.dev = &spi->dev; max->port.uartclk = 115200; max->spi = spi; strcpy(max->name, spi->modalias); max->irq = (u16)spi->irq; mutex_init(&max->thread_mutex); max->word_7bits = 0; max->parity = 0; max->baud = 0; max->cur_conf = 0; max->uart_flags = 0; /* Check if reading configuration register returns something sane */ res = RC_TAG; ret = max3110_write_then_read(max, (u8 *)&res, (u8 *)&res, 2, 0); if (ret < 0 || res == 0 || res == 0xffff) { dev_dbg(&spi->dev, "MAX3111 deemed not present (conf reg %04x)", res); ret = -ENODEV; goto err_get_page; } buffer = (void *)__get_free_page(GFP_KERNEL); if (!buffer) { ret = -ENOMEM; goto err_get_page; } max->con_xmit.buf = buffer; max->con_xmit.head = 0; max->con_xmit.tail = 0; init_waitqueue_head(&max->wq); max->main_thread = kthread_run(max3110_main_thread, max, "max3110_main"); if (IS_ERR(max->main_thread)) { ret = PTR_ERR(max->main_thread); goto err_kthread; } spi_set_drvdata(spi, max); pmax = max; /* Give membase a psudo value to pass serial_core's check */ max->port.membase = (void *)0xff110000; uart_add_one_port(&serial_m3110_reg, &max->port); return 0; err_kthread: free_page((unsigned long)buffer); err_get_page: kfree(max); return ret; }
static int g2d_probe(struct platform_device *pdev) { struct g2d_dev *dev; struct video_device *vfd; struct resource *res; const struct of_device_id *of_id; int ret = 0; dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; spin_lock_init(&dev->ctrl_lock); mutex_init(&dev->mutex); atomic_set(&dev->num_inst, 0); init_waitqueue_head(&dev->irq_queue); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); dev->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(dev->regs)) return PTR_ERR(dev->regs); dev->clk = clk_get(&pdev->dev, "sclk_fimg2d"); if (IS_ERR(dev->clk)) { dev_err(&pdev->dev, "failed to get g2d clock\n"); return -ENXIO; } ret = clk_prepare(dev->clk); if (ret) { dev_err(&pdev->dev, "failed to prepare g2d clock\n"); goto put_clk; } dev->gate = clk_get(&pdev->dev, "fimg2d"); if (IS_ERR(dev->gate)) { dev_err(&pdev->dev, "failed to get g2d clock gate\n"); ret = -ENXIO; goto unprep_clk; } ret = clk_prepare(dev->gate); if (ret) { dev_err(&pdev->dev, "failed to prepare g2d clock gate\n"); goto put_clk_gate; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(&pdev->dev, "failed to find IRQ\n"); ret = -ENXIO; goto unprep_clk_gate; } dev->irq = res->start; ret = devm_request_irq(&pdev->dev, dev->irq, g2d_isr, 0, pdev->name, dev); if (ret) { dev_err(&pdev->dev, "failed to install IRQ\n"); goto put_clk_gate; } dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev); if (IS_ERR(dev->alloc_ctx)) { ret = PTR_ERR(dev->alloc_ctx); goto unprep_clk_gate; } ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); if (ret) goto alloc_ctx_cleanup; vfd = video_device_alloc(); if (!vfd) { v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n"); ret = -ENOMEM; goto unreg_v4l2_dev; } *vfd = g2d_videodev; vfd->lock = &dev->mutex; vfd->v4l2_dev = &dev->v4l2_dev; ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); goto rel_vdev; } video_set_drvdata(vfd, dev); snprintf(vfd->name, sizeof(vfd->name), "%s", g2d_videodev.name); dev->vfd = vfd; v4l2_info(&dev->v4l2_dev, "device registered as /dev/video%d\n", vfd->num); platform_set_drvdata(pdev, dev); dev->m2m_dev = v4l2_m2m_init(&g2d_m2m_ops); if (IS_ERR(dev->m2m_dev)) { v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n"); ret = PTR_ERR(dev->m2m_dev); goto unreg_video_dev; } def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3; if (!pdev->dev.of_node) { dev->variant = g2d_get_drv_data(pdev); } else { of_id = of_match_node(exynos_g2d_match, pdev->dev.of_node); if (!of_id) { ret = -ENODEV; goto unreg_video_dev; } dev->variant = (struct g2d_variant *)of_id->data; } return 0; unreg_video_dev: video_unregister_device(dev->vfd); rel_vdev: video_device_release(vfd); unreg_v4l2_dev: v4l2_device_unregister(&dev->v4l2_dev); alloc_ctx_cleanup: vb2_dma_contig_cleanup_ctx(dev->alloc_ctx); unprep_clk_gate: clk_unprepare(dev->gate); put_clk_gate: clk_put(dev->gate); unprep_clk: clk_unprepare(dev->clk); put_clk: clk_put(dev->clk); return ret; }
static int adv7610_video_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct sensor *sens = &adv7610_data; struct device *dev = &client->dev; int ret; dev_dbg(dev, "%s\n", __func__); init_waitqueue_head(&i2c_wait); /* Set initial values for the sensor struct. */ memset(sens, 0, sizeof(adv7610_data)); sens->sen.streamcap.timeperframe.denominator = 0; sens->sen.streamcap.timeperframe.numerator = 0; sens->sen.pix.width = 0; sens->sen.pix.height = 0; #ifdef USE_16BIT sens->sen.pix.pixelformat = V4L2_PIX_FMT_YUYV; #else sens->sen.pix.pixelformat = V4L2_PIX_FMT_UYVY; /* YUV422 */ #endif pr_err("******** pixelformat=%d ************\n",sens->sen.pix.pixelformat); sens->sen.on = true; /* request reset pin */ rst_gpio = of_get_named_gpio(dev->of_node, "reset-gpio", 0); if (!gpio_is_valid(rst_gpio)) { dev_warn(dev, "no sensor reset pin available"); return -EINVAL; } ret = devm_gpio_request_one(dev, rst_gpio, GPIOF_OUT_INIT_HIGH, "adv7610_reset"); if (ret < 0) return ret; ret = of_property_read_u32(dev->of_node, "csi_id", &(sens->sen.csi)); if (ret) { dev_err(dev, "csi_id invalid\n"); return ret; } dev_dbg(dev, "IPU1_CSI%d\n", sens->sen.csi); dev_dbg(dev, "type is %d (expect %d)\n", adv7610_int_device.type, v4l2_int_type_slave); dev_dbg(dev, "num ioctls is %d\n", adv7610_int_device.u.slave->num_ioctls); //Attempt to connect to the device ret = adv7610_hw_init(client); if(ret == -ENODEV) return ret; /* This function attaches this structure to the /dev/video<n> device */ adv7610_int_device.priv = sens; ret = v4l2_int_device_register(&adv7610_int_device); return ret; }
static int ilo_open(struct inode *ip, struct file *fp) { int slot, error; struct ccb_data *data; struct ilo_hwinfo *hw; unsigned long flags; slot = iminor(ip) % MAX_CCB; hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev); /* new ccb allocation */ data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; spin_lock(&hw->open_lock); /* each fd private_data holds sw/hw view of ccb */ if (hw->ccb_alloc[slot] == NULL) { /* create a channel control block for this minor */ error = ilo_ccb_setup(hw, data, slot); if (error) { kfree(data); goto out; } data->ccb_cnt = 1; data->ccb_excl = fp->f_flags & O_EXCL; data->ilo_hw = hw; init_waitqueue_head(&data->ccb_waitq); /* write the ccb to hw */ spin_lock_irqsave(&hw->alloc_lock, flags); ilo_ccb_open(hw, data, slot); hw->ccb_alloc[slot] = data; spin_unlock_irqrestore(&hw->alloc_lock, flags); /* make sure the channel is functional */ error = ilo_ccb_verify(hw, data); if (error) { spin_lock_irqsave(&hw->alloc_lock, flags); hw->ccb_alloc[slot] = NULL; spin_unlock_irqrestore(&hw->alloc_lock, flags); ilo_ccb_close(hw->ilo_dev, data); kfree(data); goto out; } } else { kfree(data); if (fp->f_flags & O_EXCL || hw->ccb_alloc[slot]->ccb_excl) { /* * The channel exists, and either this open * or a previous open of this channel wants * exclusive access. */ error = -EBUSY; } else { hw->ccb_alloc[slot]->ccb_cnt++; error = 0; } } out: spin_unlock(&hw->open_lock); if (!error) fp->private_data = hw->ccb_alloc[slot]; return error; }
static int fsl_lbc_ctrl_probe(struct platform_device *dev) { int ret; if (!dev->dev.of_node) { dev_err(&dev->dev, "Device OF-Node is NULL"); return -EFAULT; } fsl_lbc_ctrl_dev = kzalloc(sizeof(*fsl_lbc_ctrl_dev), GFP_KERNEL); if (!fsl_lbc_ctrl_dev) return -ENOMEM; dev_set_drvdata(&dev->dev, fsl_lbc_ctrl_dev); spin_lock_init(&fsl_lbc_ctrl_dev->lock); init_waitqueue_head(&fsl_lbc_ctrl_dev->irq_wait); fsl_lbc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0); if (!fsl_lbc_ctrl_dev->regs) { dev_err(&dev->dev, "failed to get memory region\n"); ret = -ENODEV; goto err; } fsl_lbc_ctrl_dev->irq[0] = irq_of_parse_and_map(dev->dev.of_node, 0); if (!fsl_lbc_ctrl_dev->irq[0]) { dev_err(&dev->dev, "failed to get irq resource\n"); ret = -ENODEV; goto err; } fsl_lbc_ctrl_dev->dev = &dev->dev; ret = fsl_lbc_ctrl_init(fsl_lbc_ctrl_dev, dev->dev.of_node); if (ret < 0) goto err; ret = request_irq(fsl_lbc_ctrl_dev->irq[0], fsl_lbc_ctrl_irq, 0, "fsl-lbc", fsl_lbc_ctrl_dev); if (ret != 0) { dev_err(&dev->dev, "failed to install irq (%d)\n", fsl_lbc_ctrl_dev->irq[0]); ret = fsl_lbc_ctrl_dev->irq[0]; goto err; } fsl_lbc_ctrl_dev->irq[1] = irq_of_parse_and_map(dev->dev.of_node, 1); if (fsl_lbc_ctrl_dev->irq[1]) { ret = request_irq(fsl_lbc_ctrl_dev->irq[1], fsl_lbc_ctrl_irq, IRQF_SHARED, "fsl-lbc-err", fsl_lbc_ctrl_dev); if (ret) { dev_err(&dev->dev, "failed to install irq (%d)\n", fsl_lbc_ctrl_dev->irq[1]); ret = fsl_lbc_ctrl_dev->irq[1]; goto err1; } } /* Enable interrupts for any detected events */ out_be32(&fsl_lbc_ctrl_dev->regs->lteir, LTEIR_ENABLE); return 0; err1: free_irq(fsl_lbc_ctrl_dev->irq[0], fsl_lbc_ctrl_dev); err: iounmap(fsl_lbc_ctrl_dev->regs); kfree(fsl_lbc_ctrl_dev); fsl_lbc_ctrl_dev = NULL; return ret; }
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector, struct ib_ucontext *ib_context, struct ib_udata *udata) { struct iwch_dev *rhp; struct iwch_cq *chp; struct iwch_create_cq_resp uresp; struct iwch_create_cq_req ureq; struct iwch_ucontext *ucontext = NULL; static int warned; size_t resplen; PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); rhp = to_iwch_dev(ibdev); chp = kzalloc(sizeof(*chp), GFP_KERNEL); if (!chp) return ERR_PTR(-ENOMEM); if (ib_context) { ucontext = to_iwch_ucontext(ib_context); if (!t3a_device(rhp)) { if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) { kfree(chp); return ERR_PTR(-EFAULT); } chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr; } } if (t3a_device(rhp)) { /* * T3A: Add some fluff to handle extra CQEs inserted * for various errors. * Additional CQE possibilities: * TERMINATE, * incoming RDMA WRITE Failures * incoming RDMA READ REQUEST FAILUREs * NOTE: We cannot ensure the CQ won't overflow. */ entries += 16; } entries = roundup_pow_of_two(entries); chp->cq.size_log2 = ilog2(entries); if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) { kfree(chp); return ERR_PTR(-ENOMEM); } chp->rhp = rhp; chp->ibcq.cqe = 1 << chp->cq.size_log2; spin_lock_init(&chp->lock); spin_lock_init(&chp->comp_handler_lock); atomic_set(&chp->refcnt, 1); init_waitqueue_head(&chp->wait); if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) { cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); kfree(chp); return ERR_PTR(-ENOMEM); } if (ucontext) { struct iwch_mm_entry *mm; mm = kmalloc(sizeof *mm, GFP_KERNEL); if (!mm) { iwch_destroy_cq(&chp->ibcq); return ERR_PTR(-ENOMEM); } uresp.cqid = chp->cq.cqid; uresp.size_log2 = chp->cq.size_log2; spin_lock(&ucontext->mmap_lock); uresp.key = ucontext->key; ucontext->key += PAGE_SIZE; spin_unlock(&ucontext->mmap_lock); mm->key = uresp.key; mm->addr = virt_to_phys(chp->cq.queue); if (udata->outlen < sizeof uresp) { if (!warned++) printk(KERN_WARNING MOD "Warning - " "downlevel libcxgb3 (non-fatal).\n"); mm->len = PAGE_ALIGN((1UL << uresp.size_log2) * sizeof(struct t3_cqe)); resplen = sizeof(struct iwch_create_cq_resp_v0); } else { mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) * sizeof(struct t3_cqe)); uresp.memsize = mm->len; resplen = sizeof uresp; } if (ib_copy_to_udata(udata, &uresp, resplen)) { kfree(mm); iwch_destroy_cq(&chp->ibcq); return ERR_PTR(-EFAULT); } insert_mmap(ucontext, mm); } PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n", chp->cq.cqid, chp, (1 << chp->cq.size_log2), (unsigned long long) chp->cq.dma_addr); return &chp->ibcq; }
static int yurex_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_yurex *dev; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; int retval = -ENOMEM; int i; DEFINE_WAIT(wait); /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { err("Out of memory"); goto error; } kref_init(&dev->kref); mutex_init(&dev->io_mutex); spin_lock_init(&dev->lock); init_waitqueue_head(&dev->waitq); dev->udev = usb_get_dev(interface_to_usbdev(interface)); dev->interface = interface; /* set up the endpoint information */ iface_desc = interface->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) { endpoint = &iface_desc->endpoint[i].desc; if (usb_endpoint_is_int_in(endpoint)) { dev->int_in_endpointAddr = endpoint->bEndpointAddress; break; } } if (!dev->int_in_endpointAddr) { retval = -ENODEV; err("Could not find endpoints"); goto error; } /* allocate control URB */ dev->cntl_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->cntl_urb) { err("Could not allocate control URB"); goto error; } /* allocate buffer for control req */ dev->cntl_req = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE, GFP_KERNEL, &dev->cntl_urb->setup_dma); if (!dev->cntl_req) { err("Could not allocate cntl_req"); goto error; } /* allocate buffer for control msg */ dev->cntl_buffer = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE, GFP_KERNEL, &dev->cntl_urb->transfer_dma); if (!dev->cntl_buffer) { err("Could not allocate cntl_buffer"); goto error; } /* configure control URB */ dev->cntl_req->bRequestType = USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE; dev->cntl_req->bRequest = HID_REQ_SET_REPORT; dev->cntl_req->wValue = cpu_to_le16((HID_OUTPUT_REPORT + 1) << 8); dev->cntl_req->wIndex = cpu_to_le16(iface_desc->desc.bInterfaceNumber); dev->cntl_req->wLength = cpu_to_le16(YUREX_BUF_SIZE); usb_fill_control_urb(dev->cntl_urb, dev->udev, usb_sndctrlpipe(dev->udev, 0), (void *)dev->cntl_req, dev->cntl_buffer, YUREX_BUF_SIZE, yurex_control_callback, dev); dev->cntl_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* allocate interrupt URB */ dev->urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->urb) { err("Could not allocate URB"); goto error; } /* allocate buffer for interrupt in */ dev->int_buffer = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE, GFP_KERNEL, &dev->urb->transfer_dma); if (!dev->int_buffer) { err("Could not allocate int_buffer"); goto error; } /* configure interrupt URB */ usb_fill_int_urb(dev->urb, dev->udev, usb_rcvintpipe(dev->udev, dev->int_in_endpointAddr), dev->int_buffer, YUREX_BUF_SIZE, yurex_interrupt, dev, 1); dev->cntl_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; if (usb_submit_urb(dev->urb, GFP_KERNEL)) { retval = -EIO; err("Could not submitting URB"); goto error; } /* save our data pointer in this interface device */ usb_set_intfdata(interface, dev); /* we can register the device now, as it is ready */ retval = usb_register_dev(interface, &yurex_class); if (retval) { err("Not able to get a minor for this device."); usb_set_intfdata(interface, NULL); goto error; } dev->bbu = -1; dev_info(&interface->dev, "USB YUREX device now attached to Yurex #%d\n", interface->minor); return 0; error: if (dev) /* this frees allocated memory */ kref_put(&dev->kref, yurex_delete); return retval; }