/** * n_hdlc_buf_list_init - initialize specified HDLC buffer list * @list - pointer to buffer list */ static void n_hdlc_buf_list_init(struct n_hdlc_buf_list *list) { memset(list, 0, sizeof(*list)); spin_lock_init(&list->spinlock); } /* end of n_hdlc_buf_list_init() */
/*=========================================================================== METHOD: GobiUSBNetProbe (Public Method) DESCRIPTION: Run usbnet_probe Setup QMI device PARAMETERS pIntf [ I ] - Pointer to interface pVIDPIDs [ I ] - Pointer to VID/PID table RETURN VALUE: int - 0 for success Negative errno for error ===========================================================================*/ int GobiUSBNetProbe( struct usb_interface * pIntf, const struct usb_device_id * pVIDPIDs ) { int status; struct usbnet * pDev; sGobiUSBNet * pGobiDev; sEndpoints * pEndpoints; #if (LINUX_VERSION_CODE >= KERNEL_VERSION( 2,6,29 )) struct net_device_ops * pNetDevOps; #endif pEndpoints = GatherEndpoints( pIntf ); if (pEndpoints == NULL) { return -ENODEV; } status = usbnet_probe( pIntf, pVIDPIDs ); if (status < 0) { DBG( "usbnet_probe failed %d\n", status ); return status; } #if (LINUX_VERSION_CODE >= KERNEL_VERSION( 2,6,19 )) pIntf->needs_remote_wakeup = 1; #endif #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,23 )) pDev = usb_get_intfdata( pIntf ); #else pDev = (struct usbnet *)pIntf->dev.platform_data; #endif if (pDev == NULL || pDev->net == NULL) { DBG( "failed to get netdevice\n" ); usbnet_disconnect( pIntf ); kfree( pEndpoints ); return -ENXIO; } pGobiDev = kmalloc( sizeof( sGobiUSBNet ), GFP_KERNEL ); if (pGobiDev == NULL) { DBG( "falied to allocate device buffers" ); usbnet_disconnect( pIntf ); kfree( pEndpoints ); return -ENOMEM; } pDev->data[0] = (unsigned long)pGobiDev; pGobiDev->mpNetDev = pDev; pGobiDev->mpEndpoints = pEndpoints; // Overload PM related network functions #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 )) pGobiDev->mpUSBNetOpen = pDev->net->open; pDev->net->open = GobiUSBNetOpen; pGobiDev->mpUSBNetStop = pDev->net->stop; pDev->net->stop = GobiUSBNetStop; pDev->net->hard_start_xmit = GobiUSBNetStartXmit; pDev->net->tx_timeout = GobiUSBNetTXTimeout; #else pNetDevOps = kmalloc( sizeof( struct net_device_ops ), GFP_KERNEL ); if (pNetDevOps == NULL) { DBG( "falied to allocate net device ops" ); usbnet_disconnect( pIntf ); return -ENOMEM; } memcpy( pNetDevOps, pDev->net->netdev_ops, sizeof( struct net_device_ops ) ); pGobiDev->mpUSBNetOpen = pNetDevOps->ndo_open; pNetDevOps->ndo_open = GobiUSBNetOpen; pGobiDev->mpUSBNetStop = pNetDevOps->ndo_stop; pNetDevOps->ndo_stop = GobiUSBNetStop; pNetDevOps->ndo_start_xmit = GobiUSBNetStartXmit; pNetDevOps->ndo_tx_timeout = GobiUSBNetTXTimeout; pDev->net->netdev_ops = pNetDevOps; #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,31 )) memset( &(pGobiDev->mpNetDev->stats), 0, sizeof( struct net_device_stats ) ); #else memset( &(pGobiDev->mpNetDev->net->stats), 0, sizeof( struct net_device_stats ) ); #endif pGobiDev->mpIntf = pIntf; memset( &(pGobiDev->mMEID), '0', 14 ); DBG( "Mac Address:\n" ); PrintHex( &pGobiDev->mpNetDev->net->dev_addr[0], 6 ); pGobiDev->mbQMIValid = false; memset( &pGobiDev->mQMIDev, 0, sizeof( sQMIDev ) ); pGobiDev->mQMIDev.mbCdevIsInitialized = false; pGobiDev->mQMIDev.mpDevClass = gpClass; init_completion( &pGobiDev->mAutoPM.mThreadDoWork ); spin_lock_init( &pGobiDev->mQMIDev.mClientMemLock ); // Default to device down pGobiDev->mDownReason = 0; GobiSetDownReason( pGobiDev, NO_NDIS_CONNECTION ); GobiSetDownReason( pGobiDev, NET_IFACE_STOPPED ); // Register QMI status = RegisterQMIDevice( pGobiDev ); if (status != 0) { // usbnet_disconnect() will call GobiNetDriverUnbind() which will call // DeregisterQMIDevice() to clean up any partially created QMI device usbnet_disconnect( pIntf ); return status; } // Success return 0; }
static int kingsun_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_host_interface *interface; struct usb_endpoint_descriptor *endpoint; struct usb_device *dev = interface_to_usbdev(intf); struct kingsun_cb *kingsun = NULL; struct net_device *net = NULL; int ret = -ENOMEM; int pipe, maxp_in, maxp_out; __u8 ep_in; __u8 ep_out; interface = intf->cur_altsetting; if (interface->desc.bNumEndpoints != 2) { err("kingsun-sir: expected 2 endpoints, found %d", interface->desc.bNumEndpoints); return -ENODEV; } endpoint = &interface->endpoint[KINGSUN_EP_IN].desc; if (!usb_endpoint_is_int_in(endpoint)) { err("kingsun-sir: endpoint 0 is not interrupt IN"); return -ENODEV; } ep_in = endpoint->bEndpointAddress; pipe = usb_rcvintpipe(dev, ep_in); maxp_in = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); if (maxp_in > 255 || maxp_in <= 1) { err("%s: endpoint 0 has max packet size %d not in range", __FILE__, maxp_in); return -ENODEV; } endpoint = &interface->endpoint[KINGSUN_EP_OUT].desc; if (!usb_endpoint_is_int_out(endpoint)) { err("kingsun-sir: endpoint 1 is not interrupt OUT"); return -ENODEV; } ep_out = endpoint->bEndpointAddress; pipe = usb_sndintpipe(dev, ep_out); maxp_out = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); net = alloc_irdadev(sizeof(*kingsun)); if(!net) goto err_out1; SET_NETDEV_DEV(net, &intf->dev); kingsun = netdev_priv(net); kingsun->irlap = NULL; kingsun->tx_urb = NULL; kingsun->rx_urb = NULL; kingsun->ep_in = ep_in; kingsun->ep_out = ep_out; kingsun->in_buf = NULL; kingsun->out_buf = NULL; kingsun->max_rx = (__u8)maxp_in; kingsun->max_tx = (__u8)maxp_out; kingsun->netdev = net; kingsun->usbdev = dev; kingsun->rx_buff.in_frame = FALSE; kingsun->rx_buff.state = OUTSIDE_FRAME; kingsun->rx_buff.skb = NULL; kingsun->receiving = 0; spin_lock_init(&kingsun->lock); kingsun->in_buf = kmalloc(kingsun->max_rx, GFP_KERNEL); if (!kingsun->in_buf) goto free_mem; kingsun->out_buf = kmalloc(KINGSUN_FIFO_SIZE, GFP_KERNEL); if (!kingsun->out_buf) goto free_mem; printk(KERN_INFO "KingSun/DonShine IRDA/USB found at address %d, " "Vendor: %x, Product: %x\n", dev->devnum, le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); irda_init_max_qos_capabilies(&kingsun->qos); kingsun->qos.baud_rate.bits &= IR_9600; kingsun->qos.min_turn_time.bits &= KINGSUN_MTT; irda_qos_bits_to_value(&kingsun->qos); net->netdev_ops = &kingsun_ops; ret = register_netdev(net); if (ret != 0) goto free_mem; dev_info(&net->dev, "IrDA: Registered KingSun/DonShine device %s\n", net->name); usb_set_intfdata(intf, kingsun); return 0; free_mem: if (kingsun->out_buf) kfree(kingsun->out_buf); if (kingsun->in_buf) kfree(kingsun->in_buf); free_netdev(net); err_out1: return ret; }
/** * Called at device open time, sets up the structure for handling refcounting * of mm objects. */ void drm_gem_open(struct drm_device *dev, struct drm_file *file_private) { idr_init(&file_private->object_idr); spin_lock_init(&file_private->table_lock); }
/** * advdrv_init_one - Pnp to initialize the device, and allocate resource for the device. * * @dev: Points to the pci_dev device * @ent: Points to pci_device_id including the device info. */ static INT32S __devinit advdrv_init_one(struct pci_dev *dev, const struct pci_device_id *ent) { private_data *privdata = NULL; struct semaphore *dio_sema = NULL; adv_device *device = NULL; INT32S ret; if ((ret = pci_enable_device(dev)) != 0) { KdPrint("pci_enable_device failed\n"); return ret; } /* allocate urb sema */ dio_sema = kmalloc(sizeof(struct semaphore), GFP_KERNEL); if (dio_sema == NULL) { return -ENOMEM; } init_MUTEX(dio_sema); /* initialize & zero the device structure */ device = (adv_device *) kmalloc(sizeof(adv_device), GFP_KERNEL); if (device == NULL) { KdPrint("Could not kmalloc space for device!"); kfree(dio_sema); return -ENOMEM; } memset(device, 0, sizeof(adv_device)); /* alloc & initialize the private data structure */ privdata = kmalloc(sizeof(private_data), GFP_KERNEL); if (!privdata) { kfree(device); kfree(dio_sema); return -ENOMEM; } memset(privdata, 0, sizeof(private_data)); privdata->pci_slot = PCI_SLOT(dev->devfn); privdata->pci_bus = dev->bus->number; privdata->device_type = dev->device; /* multi-card support for new driver */ privdata->irq = dev->irq; privdata->dio_sema = dio_sema; printk(KERN_ERR "privdata->device = 0x%x\n", dev->device); switch (privdata->device_type) { case PCI1761: case PCI1762: case MIC3761: privdata->iobase = dev->resource[2].start & ~1UL; privdata->iolength = dev->resource[2].end - dev->resource[2].start; break; case PCI1763: privdata->iobase = dev->resource[0].start & ~1UL; privdata->iolength = dev->resource[0].end - dev->resource[0].start; break; } adv_process_info_header_init(&privdata->ptr_process_info); init_waitqueue_head(&privdata->event_wait); spin_lock_init(&privdata->spinlock); /* request I/O regions */ if (request_region(privdata->iobase, privdata->iolength, "PCI-1761") == NULL) { kfree(device); kfree(privdata); kfree(dio_sema); KdPrint("Request region failed\n"); return -ENXIO; } /* request irq */ switch (privdata->device_type) { case PCI1761: case PCI1763: case MIC3761: ret = request_irq(privdata->irq, pci1761_interrupt_handler, SA_SHIRQ, "adv1761", privdata); if (ret != 0) { release_region(privdata->iobase, privdata->iolength); kfree(device); kfree(privdata); kfree(dio_sema); KdPrint("Request IRQ failed\n"); return ret; } break; case PCI1762: ret = request_irq(privdata->irq, pci1762_interrupt_handler, SA_SHIRQ, "adv1762", privdata); if (ret != 0) { release_region(privdata->iobase, privdata->iolength); kfree(device); kfree(privdata); kfree(dio_sema); KdPrint("Request IRQ failed\n"); return ret; } break; } /* support multi-card */ switch (privdata->device_type) { case PCI1761: privdata->board_id = (INT16U) (advInp(privdata, 0x02) & 0x0f); advdrv_device_set_devname(device, "pci1761"); break; case MIC3761: privdata->board_id = (INT16U) (advInp(privdata, 0x02) & 0x0f); advdrv_device_set_devname(device, "mic3761"); break; case PCI1762: privdata->board_id = (INT16U) (advInp(privdata, 0x04) & 0x0f); advdrv_device_set_devname(device, "pci1762"); break; case PCI1763: privdata->board_id = (INT16U) (advInp(privdata, 0x02) & 0x0f); advdrv_device_set_devname(device, "pci1763up"); break; default: break; } /* link the info into the other structures */ _ADV_SET_DEVICE_PRIVDATA(device, privdata); _ADV_SET_DEVICE_BOARDID(device, privdata->board_id); _ADV_SET_DEVICE_SLOT(device, privdata->pci_slot); _ADV_SET_DEVICE_IOBASE(device, privdata->iobase); _ADV_SET_DEVICE_IRQ(device, privdata->irq); pci_set_drvdata(dev, device); /* add device into driver list */ ret = advdrv_add_device(&pci1761_driver, device); if (ret != 0) { release_region(privdata->iobase, privdata->iolength); free_irq(privdata->irq, privdata); kfree(device); kfree(privdata); kfree(dio_sema); KdPrint("Add device failed!\n"); return ret; } printk("Add a PCI-%x device: iobase=%xh; irq=%xh; slot=%xh\n", dev->device, privdata->iobase, privdata->irq, privdata->pci_slot); return 0; }
static int __devinit pm8xxx_vib_probe(struct platform_device *pdev) { const struct pm8xxx_vibrator_platform_data *pdata = pdev->dev.platform_data; struct pm8xxx_vib *vib; u8 val; int rc; if (!pdata) return -EINVAL; if (pdata->level_mV < VIB_MIN_LEVEL_mV || pdata->level_mV > VIB_MAX_LEVEL_mV) return -EINVAL; vib = kzalloc(sizeof(*vib), GFP_KERNEL); if (!vib) return -ENOMEM; vib->pdata = pdata; vib->level = pdata->level_mV / 100; vib->dev = &pdev->dev; #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_VOL vib->default_level = vib->level; vib->request_level = vib->level; #endif #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_MIN_TIMEOUT vib->min_timeout_ms = pdata->min_timeout_ms; vib->pre_value = 0; #endif #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_OVERDRIVE vib->overdrive_ms = pdata->overdrive_ms; vib->overdrive_range_ms = pdata->overdrive_range_ms; #endif #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_REST_POWER vib->min_stop_ms = pdata->min_stop_ms; vib->start_tv.tv_sec = 0; vib->start_tv.tv_usec = 0; vib->stop_tv.tv_sec = 0; vib->stop_tv.tv_usec = 0; #endif vib->max_level_mv = VIB_MAX_LEVEL_mV; vib->min_level_mv = VIB_MIN_LEVEL_mV; spin_lock_init(&vib->lock); INIT_WORK(&vib->work, pm8xxx_vib_update); hrtimer_init(&vib->vib_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); vib->vib_timer.function = pm8xxx_vib_timer_func; #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_OVERDRIVE hrtimer_init(&vib->vib_overdrive_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); vib->vib_overdrive_timer.function = pm8xxx_vib_overdrive_timer_func; #endif vib->timed_dev.name = "vibrator"; vib->timed_dev.get_time = pm8xxx_vib_get_time; vib->timed_dev.enable = pm8xxx_vib_enable; __dump_vib_regs(vib, "boot_vib_default"); /* * Configure the vibrator, it operates in manual mode * for timed_output framework. */ rc = pm8xxx_vib_read_u8(vib, &val, VIB_DRV); if (rc < 0) goto err_read_vib; val &= ~VIB_DRV_EN_MANUAL_MASK; rc = pm8xxx_vib_write_u8(vib, val, VIB_DRV); if (rc < 0) goto err_read_vib; vib->reg_vib_drv = val; rc = timed_output_dev_register(&vib->timed_dev); if (rc < 0) goto err_read_vib; rc = sysfs_create_group(&vib->timed_dev.dev->kobj, &pm8xxx_vib_attr_group); #if 0 #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_VOL rc = device_create_file(vib->timed_dev.dev, &dev_attr_amp); if (rc < 0) goto err_read_vib; rc = device_create_file(vib->timed_dev.dev, &dev_attr_default_level); if (rc < 0) goto err_read_vib; #endif // #ifndef CONFIG_MACH_LGE pm8xxx_vib_enable(&vib->timed_dev, pdata->initial_vibrate_ms); #endif #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_MIN_TIMEOUT rc = device_create_file(vib->timed_dev.dev, &dev_attr_min_ms); if (rc < 0) goto err_read_vib; #endif #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_OVERDRIVE rc = device_create_file(vib->timed_dev.dev, &dev_attr_over_ms); if (rc < 0) goto err_read_vib; rc = device_create_file(vib->timed_dev.dev, &dev_attr_over_range_ms); if (rc < 0) goto err_read_vib; #endif #ifdef CONFIG_LGE_PMIC8XXX_VIBRATOR_REST_POWER rc = device_create_file(vib->timed_dev.dev, &dev_attr_min_stop_ms); if (rc < 0) goto err_read_vib; #endif #endif // #if 0 platform_set_drvdata(pdev, vib); vib_dev = vib; return 0; err_read_vib: kfree(vib); return rc; }
struct net_device * islpci_setup(struct pci_dev *pdev) { islpci_private *priv; struct net_device *ndev = alloc_etherdev(sizeof (islpci_private)); if (!ndev) return ndev; pci_set_drvdata(pdev, ndev); #if defined(SET_NETDEV_DEV) SET_NETDEV_DEV(ndev, &pdev->dev); #endif /* setup the structure members */ ndev->base_addr = pci_resource_start(pdev, 0); ndev->irq = pdev->irq; /* initialize the function pointers */ ndev->netdev_ops = &islpci_netdev_ops; ndev->wireless_handlers = &prism54_handler_def; ndev->ethtool_ops = &islpci_ethtool_ops; /* ndev->set_multicast_list = &islpci_set_multicast_list; */ ndev->addr_len = ETH_ALEN; /* Get a non-zero dummy MAC address for nameif. Jean II */ memcpy(ndev->dev_addr, dummy_mac, 6); ndev->watchdog_timeo = ISLPCI_TX_TIMEOUT; /* allocate a private device structure to the network device */ priv = netdev_priv(ndev); priv->ndev = ndev; priv->pdev = pdev; priv->monitor_type = ARPHRD_IEEE80211; priv->ndev->type = (priv->iw_mode == IW_MODE_MONITOR) ? priv->monitor_type : ARPHRD_ETHER; /* Add pointers to enable iwspy support. */ priv->wireless_data.spy_data = &priv->spy_data; ndev->wireless_data = &priv->wireless_data; /* save the start and end address of the PCI memory area */ ndev->mem_start = (unsigned long) priv->device_base; ndev->mem_end = ndev->mem_start + ISL38XX_PCI_MEM_SIZE; #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "PCI Memory remapped to 0x%p\n", priv->device_base); #endif init_waitqueue_head(&priv->reset_done); /* init the queue read locks, process wait counter */ mutex_init(&priv->mgmt_lock); priv->mgmt_received = NULL; init_waitqueue_head(&priv->mgmt_wqueue); mutex_init(&priv->stats_lock); spin_lock_init(&priv->slock); /* init state machine with off#1 state */ priv->state = PRV_STATE_OFF; priv->state_off = 1; /* initialize workqueue's */ INIT_WORK(&priv->stats_work, prism54_update_stats); priv->stats_timestamp = 0; INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake); priv->reset_task_pending = 0; /* allocate various memory areas */ if (islpci_alloc_memory(priv)) goto do_free_netdev; /* select the firmware file depending on the device id */ switch (pdev->device) { case 0x3877: strcpy(priv->firmware, ISL3877_IMAGE_FILE); break; case 0x3886: strcpy(priv->firmware, ISL3886_IMAGE_FILE); break; default: strcpy(priv->firmware, ISL3890_IMAGE_FILE); break; } if (register_netdev(ndev)) { DEBUG(SHOW_ERROR_MESSAGES, "ERROR: register_netdev() failed\n"); goto do_islpci_free_memory; } return ndev; do_islpci_free_memory: islpci_free_memory(priv); do_free_netdev: pci_set_drvdata(pdev, NULL); free_netdev(ndev); priv = NULL; return NULL; }
static int gmc_v9_0_sw_init(void *handle) { int r; int dma_bits; struct amdgpu_device *adev = (struct amdgpu_device *)handle; gfxhub_v1_0_init(adev); mmhub_v1_0_init(adev); spin_lock_init(&adev->gmc.invalidate_lock); adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev); switch (adev->asic_type) { case CHIP_RAVEN: if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); } else { /* vm_size is 128TB + 512GB for legacy 3-level page support */ amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48); adev->gmc.translate_further = adev->vm_manager.num_level > 1; } break; case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_VEGA20: /* * To fulfill 4-level page support, * vm size is 256TB (48bit), maximum size of Vega10, * block size 512 (9bit) */ amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); break; default: break; } /* This interrupt is VMC page fault.*/ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT, &adev->gmc.vm_fault); r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT, &adev->gmc.vm_fault); if (r) return r; /* Set the internal MC address mask * This is the max address of the GPU's * internal address space. */ adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ /* set DMA mask + need_dma32 flags. * PCIE - can handle 44-bits. * IGP - can handle 44-bits * PCI - dma32 for legacy pci gart, 44 bits on vega10 */ adev->need_dma32 = false; dma_bits = adev->need_dma32 ? 32 : 44; r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); if (r) { adev->need_dma32 = true; dma_bits = 32; printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); } r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); if (r) { pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); printk(KERN_WARNING "amdgpu: No coherent DMA available.\n"); } adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits); r = gmc_v9_0_mc_init(adev); if (r) return r; adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev); /* Memory manager */ r = amdgpu_bo_init(adev); if (r) return r; r = gmc_v9_0_gart_init(adev); if (r) return r; /* * number of VMs * VMID 0 is reserved for System * amdgpu graphics/compute will use VMIDs 1-7 * amdkfd will use VMIDs 8-15 */ adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS; adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS; amdgpu_vm_manager_init(adev); return 0; }
void msm_gemini_core_init(void) { init_waitqueue_head(&reset_wait); spin_lock_init(&reset_lock); }
static int twl6030_usb_probe(struct platform_device *pdev) { u32 ret; struct twl6030_usb *twl; int status, err; struct device_node *np = pdev->dev.of_node; struct device *dev = &pdev->dev; struct twl4030_usb_data *pdata = dev_get_platdata(dev); twl = devm_kzalloc(dev, sizeof(*twl), GFP_KERNEL); if (!twl) return -ENOMEM; twl->dev = &pdev->dev; twl->irq1 = platform_get_irq(pdev, 0); twl->irq2 = platform_get_irq(pdev, 1); twl->linkstat = MUSB_UNKNOWN; twl->comparator.set_vbus = twl6030_set_vbus; twl->comparator.start_srp = twl6030_start_srp; ret = omap_usb2_set_comparator(&twl->comparator); if (ret == -ENODEV) { dev_info(&pdev->dev, "phy not ready, deferring probe"); return -EPROBE_DEFER; } if (np) { twl->regulator = "usb"; } else if (pdata) { if (pdata->features & TWL6032_SUBCLASS) twl->regulator = "ldousb"; else twl->regulator = "vusb"; } else { dev_err(&pdev->dev, "twl6030 initialized without pdata\n"); return -EINVAL; } /* init spinlock for workqueue */ spin_lock_init(&twl->lock); err = twl6030_usb_ldo_init(twl); if (err) { dev_err(&pdev->dev, "ldo init failed\n"); return err; } platform_set_drvdata(pdev, twl); if (device_create_file(&pdev->dev, &dev_attr_vbus)) dev_warn(&pdev->dev, "could not create sysfs file\n"); INIT_WORK(&twl->set_vbus_work, otg_set_vbus_work); status = request_threaded_irq(twl->irq1, NULL, twl6030_usbotg_irq, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT, "twl6030_usb", twl); if (status < 0) { dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", twl->irq1, status); device_remove_file(twl->dev, &dev_attr_vbus); return status; } status = request_threaded_irq(twl->irq2, NULL, twl6030_usb_irq, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT, "twl6030_usb", twl); if (status < 0) { dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", twl->irq2, status); free_irq(twl->irq1, twl); device_remove_file(twl->dev, &dev_attr_vbus); return status; } twl->asleep = 0; twl6030_enable_irq(twl); dev_info(&pdev->dev, "Initialized TWL6030 USB module\n"); return 0; }
static int ubifs_fill_super(struct super_block *sb, void *data, int silent) { struct ubi_volume_desc *ubi = sb->s_fs_info; struct ubifs_info *c; struct inode *root; int err; c = kzalloc(sizeof(struct ubifs_info), GFP_KERNEL); if (!c) return -ENOMEM; spin_lock_init(&c->cnt_lock); spin_lock_init(&c->cs_lock); spin_lock_init(&c->buds_lock); spin_lock_init(&c->space_lock); spin_lock_init(&c->orphan_lock); init_rwsem(&c->commit_sem); mutex_init(&c->lp_mutex); mutex_init(&c->tnc_mutex); mutex_init(&c->log_mutex); mutex_init(&c->mst_mutex); mutex_init(&c->umount_mutex); init_waitqueue_head(&c->cmt_wq); c->buds = RB_ROOT; c->old_idx = RB_ROOT; c->size_tree = RB_ROOT; c->orph_tree = RB_ROOT; INIT_LIST_HEAD(&c->infos_list); INIT_LIST_HEAD(&c->idx_gc); INIT_LIST_HEAD(&c->replay_list); INIT_LIST_HEAD(&c->replay_buds); INIT_LIST_HEAD(&c->uncat_list); INIT_LIST_HEAD(&c->empty_list); INIT_LIST_HEAD(&c->freeable_list); INIT_LIST_HEAD(&c->frdi_idx_list); INIT_LIST_HEAD(&c->unclean_leb_list); INIT_LIST_HEAD(&c->old_buds); INIT_LIST_HEAD(&c->orph_list); INIT_LIST_HEAD(&c->orph_new); c->highest_inum = UBIFS_FIRST_INO; c->lhead_lnum = c->ltail_lnum = UBIFS_LOG_LNUM; ubi_get_volume_info(ubi, &c->vi); ubi_get_device_info(c->vi.ubi_num, &c->di); /* Re-open the UBI device in read-write mode */ c->ubi = ubi_open_volume(c->vi.ubi_num, c->vi.vol_id, UBI_READONLY); if (IS_ERR(c->ubi)) { err = PTR_ERR(c->ubi); goto out_free; } c->vfs_sb = sb; sb->s_fs_info = c; sb->s_magic = UBIFS_SUPER_MAGIC; sb->s_blocksize = UBIFS_BLOCK_SIZE; sb->s_blocksize_bits = UBIFS_BLOCK_SHIFT; sb->s_dev = c->vi.cdev; sb->s_maxbytes = c->max_inode_sz = key_max_inode_size(c); if (c->max_inode_sz > MAX_LFS_FILESIZE) sb->s_maxbytes = c->max_inode_sz = MAX_LFS_FILESIZE; if (c->rw_incompat) { ubifs_err("the file-system is not R/W-compatible"); ubifs_msg("on-flash format version is w%d/r%d, but software " "only supports up to version w%d/r%d", c->fmt_version, c->ro_compat_version, UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION); return -EROFS; } mutex_lock(&c->umount_mutex); err = mount_ubifs(c); if (err) { ubifs_assert(err < 0); goto out_unlock; } /* Read the root inode */ root = ubifs_iget(sb, UBIFS_ROOT_INO); if (IS_ERR(root)) { err = PTR_ERR(root); goto out_umount; } sb->s_root = NULL; mutex_unlock(&c->umount_mutex); return 0; out_umount: ubifs_umount(c); out_unlock: mutex_unlock(&c->umount_mutex); ubi_close_volume(c->ubi); out_free: kfree(c); return err; }
static int msm_i2c_probe(struct platform_device *pdev) { struct msm_i2c_dev *dev; struct resource *mem, *irq, *ioarea; struct msm_i2c_device_platform_data *pdata = pdev->dev.platform_data; int ret; int fs_div; int hs_div; int i2c_clk, i2c_clock; int clk_ctl; struct clk *clk; printk(KERN_INFO "msm_i2c_probe\n"); /* NOTE: driver uses the static register mapping */ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "no mem resource?\n"); return -ENODEV; } irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq) { dev_err(&pdev->dev, "no irq resource?\n"); return -ENODEV; } ioarea = request_mem_region(mem->start, (mem->end - mem->start) + 1, pdev->name); if (!ioarea) { dev_err(&pdev->dev, "I2C region already claimed\n"); return -EBUSY; } clk = clk_get(&pdev->dev, "i2c_clk"); if (IS_ERR(clk)) { dev_err(&pdev->dev, "Could not get clock\n"); ret = PTR_ERR(clk); goto err_clk_get_failed; } dev = kzalloc(sizeof(struct msm_i2c_dev), GFP_KERNEL); if (!dev) { ret = -ENOMEM; goto err_alloc_dev_failed; } dev->dev = &pdev->dev; dev->irq = irq->start; dev->clk = clk; dev->base = ioremap(mem->start, (mem->end - mem->start) + 1); if (!dev->base) { ret = -ENOMEM; goto err_ioremap_failed; } spin_lock_init(&dev->lock); wake_lock_init(&dev->wakelock, WAKE_LOCK_SUSPEND, "i2c"); platform_set_drvdata(pdev, dev); if (pdata) { dev->clk_drv_str = pdata->clock_strength; dev->dat_drv_str = pdata->data_strength; if (pdata->i2c_clock < 100000 || pdata->i2c_clock > 400000) i2c_clock = 100000; else i2c_clock = pdata->i2c_clock; if(pdata->reset_slave) dev->reset_slave = pdata->reset_slave; } else { dev->clk_drv_str = 0; dev->dat_drv_str = 0; i2c_clock = 100000; dev->skip_recover = 1; } if (!dev->skip_recover) msm_set_i2c_mux(false, NULL, NULL, dev->clk_drv_str, dev->dat_drv_str); clk_enable(clk); /* I2C_HS_CLK = I2C_CLK/(3*(HS_DIVIDER_VALUE+1) */ /* I2C_FS_CLK = I2C_CLK/(2*(FS_DIVIDER_VALUE+3) */ /* FS_DIVIDER_VALUE = ((I2C_CLK / I2C_FS_CLK) / 2) - 3 */ i2c_clk = 19200000; /* input clock */ fs_div = ((i2c_clk / i2c_clock) / 2) - 3; hs_div = 3; clk_ctl = ((hs_div & 0x7) << 8) | (fs_div & 0xff); writel(clk_ctl, dev->base + I2C_CLK_CTL); printk(KERN_INFO "msm_i2c_probe: clk_ctl %x, %d Hz\n", clk_ctl, i2c_clk / (2 * ((clk_ctl & 0xff) + 3))); clk_disable(clk); i2c_set_adapdata(&dev->adapter, dev); dev->adapter.algo = &msm_i2c_algo; strncpy(dev->adapter.name, "MSM I2C adapter", sizeof(dev->adapter.name)); dev->adapter.nr = pdev->id; ret = i2c_add_numbered_adapter(&dev->adapter); if (ret) { dev_err(&pdev->dev, "i2c_add_adapter failed\n"); goto err_i2c_add_adapter_failed; } ret = request_irq(dev->irq, msm_i2c_interrupt, IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TIMER, pdev->name, dev); if (ret) { dev_err(&pdev->dev, "request_irq failed\n"); goto err_request_irq_failed; } disable_irq(dev->irq); return 0; /* free_irq(dev->irq, dev); */ err_request_irq_failed: i2c_del_adapter(&dev->adapter); err_i2c_add_adapter_failed: iounmap(dev->base); err_ioremap_failed: kfree(dev); err_alloc_dev_failed: clk_put(clk); err_clk_get_failed: release_mem_region(mem->start, (mem->end - mem->start) + 1); return ret; }
static __devinit int s3c_pcm_dev_probe(struct platform_device *pdev) { struct s3c_pcm_info *pcm; struct resource *mem_res, *dmatx_res, *dmarx_res; struct s3c_audio_pdata *pcm_pdata; int ret; /* Check for valid device index */ if ((pdev->id < 0) || pdev->id >= ARRAY_SIZE(s3c_pcm)) { dev_err(&pdev->dev, "id %d out of range\n", pdev->id); return -EINVAL; } pcm_pdata = pdev->dev.platform_data; /* Check for availability of necessary resource */ dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!dmatx_res) { dev_err(&pdev->dev, "Unable to get PCM-TX dma resource\n"); return -ENXIO; } dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (!dmarx_res) { dev_err(&pdev->dev, "Unable to get PCM-RX dma resource\n"); return -ENXIO; } mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem_res) { dev_err(&pdev->dev, "Unable to get register resource\n"); return -ENXIO; } if (pcm_pdata && pcm_pdata->cfg_gpio && pcm_pdata->cfg_gpio(pdev)) { dev_err(&pdev->dev, "Unable to configure gpio\n"); return -EINVAL; } pcm = &s3c_pcm[pdev->id]; pcm->dev = &pdev->dev; spin_lock_init(&pcm->lock); /* Default is 128fs */ pcm->sclk_per_fs = 128; pcm->cclk = clk_get(&pdev->dev, "audio-bus"); if (IS_ERR(pcm->cclk)) { dev_err(&pdev->dev, "failed to get audio-bus\n"); ret = PTR_ERR(pcm->cclk); goto err1; } clk_enable(pcm->cclk); /* record our pcm structure for later use in the callbacks */ dev_set_drvdata(&pdev->dev, pcm); if (!request_mem_region(mem_res->start, resource_size(mem_res), "samsung-pcm")) { dev_err(&pdev->dev, "Unable to request register region\n"); ret = -EBUSY; goto err2; } pcm->regs = ioremap(mem_res->start, 0x100); if (pcm->regs == NULL) { dev_err(&pdev->dev, "cannot ioremap registers\n"); ret = -ENXIO; goto err3; } pcm->pclk = clk_get(&pdev->dev, "pcm"); if (IS_ERR(pcm->pclk)) { dev_err(&pdev->dev, "failed to get pcm_clock\n"); ret = -ENOENT; goto err4; } clk_enable(pcm->pclk); ret = snd_soc_register_dai(&pdev->dev, &s3c_pcm_dai[pdev->id]); if (ret != 0) { dev_err(&pdev->dev, "failed to get pcm_clock\n"); goto err5; } s3c_pcm_stereo_in[pdev->id].dma_addr = mem_res->start + S3C_PCM_RXFIFO; s3c_pcm_stereo_out[pdev->id].dma_addr = mem_res->start + S3C_PCM_TXFIFO; s3c_pcm_stereo_in[pdev->id].channel = dmarx_res->start; s3c_pcm_stereo_out[pdev->id].channel = dmatx_res->start; pcm->dma_capture = &s3c_pcm_stereo_in[pdev->id]; pcm->dma_playback = &s3c_pcm_stereo_out[pdev->id]; return 0; err5: clk_disable(pcm->pclk); clk_put(pcm->pclk); err4: iounmap(pcm->regs); err3: release_mem_region(mem_res->start, resource_size(mem_res)); err2: clk_disable(pcm->cclk); clk_put(pcm->cclk); err1: return ret; }
asmlinkage long sys_timer_create(const clockid_t which_clock, struct sigevent __user *timer_event_spec, timer_t __user * created_timer_id) { int error = 0; struct k_itimer *new_timer = NULL; int new_timer_id; struct task_struct *process = NULL; unsigned long flags; sigevent_t event; int it_id_set = IT_ID_NOT_SET; if (invalid_clockid(which_clock)) return -EINVAL; new_timer = alloc_posix_timer(); if (unlikely(!new_timer)) return -EAGAIN; spin_lock_init(&new_timer->it_lock); retry: if (unlikely(!idr_pre_get(&posix_timers_id, GFP_KERNEL))) { error = -EAGAIN; goto out; } spin_lock_irq(&idr_lock); error = idr_get_new(&posix_timers_id, (void *) new_timer, &new_timer_id); spin_unlock_irq(&idr_lock); if (error == -EAGAIN) goto retry; else if (error) { /* * Wierd looking, but we return EAGAIN if the IDR is * full (proper POSIX return value for this) */ error = -EAGAIN; goto out; } it_id_set = IT_ID_SET; new_timer->it_id = (timer_t) new_timer_id; new_timer->it_clock = which_clock; new_timer->it_overrun = -1; error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer)); if (error) goto out; /* * return the timer_id now. The next step is hard to * back out if there is an error. */ if (copy_to_user(created_timer_id, &new_timer_id, sizeof (new_timer_id))) { error = -EFAULT; goto out; } if (timer_event_spec) { if (copy_from_user(&event, timer_event_spec, sizeof (event))) { error = -EFAULT; goto out; } new_timer->it_sigev_notify = event.sigev_notify; new_timer->it_sigev_signo = event.sigev_signo; new_timer->it_sigev_value = event.sigev_value; read_lock(&tasklist_lock); if ((process = good_sigevent(&event))) { /* * We may be setting up this process for another * thread. It may be exiting. To catch this * case the we check the PF_EXITING flag. If * the flag is not set, the siglock will catch * him before it is too late (in exit_itimers). * * The exec case is a bit more invloved but easy * to code. If the process is in our thread * group (and it must be or we would not allow * it here) and is doing an exec, it will cause * us to be killed. In this case it will wait * for us to die which means we can finish this * linkage with our last gasp. I.e. no code :) */ spin_lock_irqsave(&process->sighand->siglock, flags); if (!(process->flags & PF_EXITING)) { new_timer->it_process = process; list_add(&new_timer->list, &process->signal->posix_timers); if (new_timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) get_task_struct(process); spin_unlock_irqrestore(&process->sighand->siglock, flags); } else { spin_unlock_irqrestore(&process->sighand->siglock, flags); process = NULL; } } read_unlock(&tasklist_lock); if (!process) { error = -EINVAL; goto out; } } else { new_timer->it_sigev_notify = SIGEV_SIGNAL; new_timer->it_sigev_signo = SIGALRM; new_timer->it_sigev_value.sival_int = new_timer->it_id; process = current->group_leader; spin_lock_irqsave(&process->sighand->siglock, flags); new_timer->it_process = process; list_add(&new_timer->list, &process->signal->posix_timers); spin_unlock_irqrestore(&process->sighand->siglock, flags); } /* * In the case of the timer belonging to another task, after * the task is unlocked, the timer is owned by the other task * and may cease to exist at any time. Don't use or modify * new_timer after the unlock call. */ out: if (error) release_posix_timer(new_timer, it_id_set); return error; }
/* Allocate memory for one device */ static int nozomi_card_init(struct pci_dev *pdev, const struct pci_device_id *ent) { resource_size_t start; int ret; struct nozomi *dc = NULL; int ndev_idx; int i; dev_dbg(&pdev->dev, "Init, new card found\n"); for (ndev_idx = 0; ndev_idx < ARRAY_SIZE(ndevs); ndev_idx++) if (!ndevs[ndev_idx]) break; if (ndev_idx >= ARRAY_SIZE(ndevs)) { dev_err(&pdev->dev, "no free tty range for this card left\n"); ret = -EIO; goto err; } dc = kzalloc(sizeof(struct nozomi), GFP_KERNEL); if (unlikely(!dc)) { dev_err(&pdev->dev, "Could not allocate memory\n"); ret = -ENOMEM; goto err_free; } dc->pdev = pdev; ret = pci_enable_device(dc->pdev); if (ret) { dev_err(&pdev->dev, "Failed to enable PCI Device\n"); goto err_free; } ret = pci_request_regions(dc->pdev, NOZOMI_NAME); if (ret) { dev_err(&pdev->dev, "I/O address 0x%04x already in use\n", (int) /* nozomi_private.io_addr */ 0); goto err_disable_device; } start = pci_resource_start(dc->pdev, 0); if (start == 0) { dev_err(&pdev->dev, "No I/O address for card detected\n"); ret = -ENODEV; goto err_rel_regs; } /* Find out what card type it is */ nozomi_get_card_type(dc); dc->base_addr = ioremap_nocache(start, dc->card_type); if (!dc->base_addr) { dev_err(&pdev->dev, "Unable to map card MMIO\n"); ret = -ENODEV; goto err_rel_regs; } dc->send_buf = kmalloc(SEND_BUF_MAX, GFP_KERNEL); if (!dc->send_buf) { dev_err(&pdev->dev, "Could not allocate send buffer?\n"); ret = -ENOMEM; goto err_free_sbuf; } for (i = PORT_MDM; i < MAX_PORT; i++) { if (kfifo_alloc(&dc->port[i].fifo_ul, FIFO_BUFFER_SIZE_UL, GFP_KERNEL)) { dev_err(&pdev->dev, "Could not allocate kfifo buffer\n"); ret = -ENOMEM; goto err_free_kfifo; } } spin_lock_init(&dc->spin_mutex); nozomi_setup_private_data(dc); /* Disable all interrupts */ dc->last_ier = 0; writew(dc->last_ier, dc->reg_ier); ret = request_irq(pdev->irq, &interrupt_handler, IRQF_SHARED, NOZOMI_NAME, dc); if (unlikely(ret)) { dev_err(&pdev->dev, "can't request irq %d\n", pdev->irq); goto err_free_kfifo; } DBG1("base_addr: %p", dc->base_addr); make_sysfs_files(dc); dc->index_start = ndev_idx * MAX_PORT; ndevs[ndev_idx] = dc; pci_set_drvdata(pdev, dc); /* Enable RESET interrupt */ dc->last_ier = RESET; iowrite16(dc->last_ier, dc->reg_ier); dc->state = NOZOMI_STATE_ENABLED; for (i = 0; i < MAX_PORT; i++) { struct device *tty_dev; struct port *port = &dc->port[i]; port->dc = dc; tty_port_init(&port->port); port->port.ops = &noz_tty_port_ops; tty_dev = tty_port_register_device(&port->port, ntty_driver, dc->index_start + i, &pdev->dev); if (IS_ERR(tty_dev)) { ret = PTR_ERR(tty_dev); dev_err(&pdev->dev, "Could not allocate tty?\n"); tty_port_destroy(&port->port); goto err_free_tty; } } return 0; err_free_tty: for (i = 0; i < MAX_PORT; ++i) { tty_unregister_device(ntty_driver, dc->index_start + i); tty_port_destroy(&dc->port[i].port); } err_free_kfifo: for (i = 0; i < MAX_PORT; i++) kfifo_free(&dc->port[i].fifo_ul); err_free_sbuf: kfree(dc->send_buf); iounmap(dc->base_addr); err_rel_regs: pci_release_regions(pdev); err_disable_device: pci_disable_device(pdev); err_free: kfree(dc); err: return ret; }
static int __devinit gtm_probe(struct of_device *dev, const struct of_device_id *match) { struct device_node *np = dev->node; struct resource res; int ret = 0; u32 busfreq = fsl_get_sys_freq(); struct gtm_priv *priv; if (busfreq == 0) { dev_err(&dev->dev, "gtm: No bus frequency in device tree.\n"); return -ENODEV; } priv = kmalloc(sizeof(struct gtm_priv), GFP_KERNEL); if (!priv) return -ENOMEM; spin_lock_init(&priv->lock); dev_set_drvdata(&dev->dev, priv); ret = of_address_to_resource(np, 0, &res); if (ret) goto out; priv->irq = irq_of_parse_and_map(np, 0); if (priv->irq == NO_IRQ) { dev_err(&dev->dev, "mpc83xx-gtm exists in device tree " "without an IRQ.\n"); ret = -ENODEV; goto out; } ret = request_irq(priv->irq, fsl_gtm_isr, 0, "gtm timer", priv); if (ret) goto out; priv->regs = ioremap(res.start, sizeof(struct gtm_regs)); if (!priv->regs) { ret = -ENOMEM; goto out; } /* Disable the unused clocks to save power. */ out_8(&priv->regs->cfr1, CFR1_STP1 | CFR1_STP2); out_8(&priv->regs->cfr2, CFR2_STP3 | CFR2_STP4); /* * Maximum prescaling is used (input clock/16, 256 primary prescaler, * 256 secondary prescaler) to maximize the timer's range. With a * bus clock of 133MHz, this yields a maximum interval of 516 * seconds while retaining subsecond precision. Since only * timer 4 is supported for wakeup on the 8313, and timer 4 * is the LSB when chained, we can't use chaining to increase * the range. */ priv->ticks_per_sec = busfreq / (16*256*256); ret = device_create_file(&dev->dev, &dev_attr_timeout); if (ret) goto out; return 0; out: kfree(priv); return ret; }
static int rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; struct netdev_private *np; static int card_idx; int chip_idx = ent->driver_data; int err, irq; void __iomem *ioaddr; static int version_printed; void *ring_space; dma_addr_t ring_dma; if (!version_printed++) printk ("%s", version); err = pci_enable_device (pdev); if (err) return err; irq = pdev->irq; err = pci_request_regions (pdev, "dl2k"); if (err) goto err_out_disable; pci_set_master (pdev); err = -ENOMEM; dev = alloc_etherdev (sizeof (*np)); if (!dev) goto err_out_res; SET_NETDEV_DEV(dev, &pdev->dev); np = netdev_priv(dev); /* IO registers range. */ ioaddr = pci_iomap(pdev, 0, 0); if (!ioaddr) goto err_out_dev; np->eeprom_addr = ioaddr; #ifdef MEM_MAPPING /* MM registers range. */ ioaddr = pci_iomap(pdev, 1, 0); if (!ioaddr) goto err_out_iounmap; #endif np->ioaddr = ioaddr; np->chip_id = chip_idx; np->pdev = pdev; spin_lock_init (&np->tx_lock); spin_lock_init (&np->rx_lock); /* Parse manual configuration */ np->an_enable = 1; np->tx_coalesce = 1; if (card_idx < MAX_UNITS) { if (media[card_idx] != NULL) { np->an_enable = 0; if (strcmp (media[card_idx], "auto") == 0 || strcmp (media[card_idx], "autosense") == 0 || strcmp (media[card_idx], "0") == 0 ) { np->an_enable = 2; } else if (strcmp (media[card_idx], "100mbps_fd") == 0 || strcmp (media[card_idx], "4") == 0) { np->speed = 100; np->full_duplex = 1; } else if (strcmp (media[card_idx], "100mbps_hd") == 0 || strcmp (media[card_idx], "3") == 0) { np->speed = 100; np->full_duplex = 0; } else if (strcmp (media[card_idx], "10mbps_fd") == 0 || strcmp (media[card_idx], "2") == 0) { np->speed = 10; np->full_duplex = 1; } else if (strcmp (media[card_idx], "10mbps_hd") == 0 || strcmp (media[card_idx], "1") == 0) { np->speed = 10; np->full_duplex = 0; } else if (strcmp (media[card_idx], "1000mbps_fd") == 0 || strcmp (media[card_idx], "6") == 0) { np->speed=1000; np->full_duplex=1; } else if (strcmp (media[card_idx], "1000mbps_hd") == 0 || strcmp (media[card_idx], "5") == 0) { np->speed = 1000; np->full_duplex = 0; } else { np->an_enable = 1; } } if (jumbo[card_idx] != 0) { np->jumbo = 1; dev->mtu = MAX_JUMBO; } else { np->jumbo = 0; if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE) dev->mtu = mtu[card_idx]; } np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ? vlan[card_idx] : 0; if (rx_coalesce > 0 && rx_timeout > 0) { np->rx_coalesce = rx_coalesce; np->rx_timeout = rx_timeout; np->coalesce = 1; } np->tx_flow = (tx_flow == 0) ? 0 : 1; np->rx_flow = (rx_flow == 0) ? 0 : 1; if (tx_coalesce < 1) tx_coalesce = 1; else if (tx_coalesce > TX_RING_SIZE-1) tx_coalesce = TX_RING_SIZE - 1; } dev->netdev_ops = &netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; dev->ethtool_ops = ðtool_ops; #if 0 dev->features = NETIF_F_IP_CSUM; #endif /* MTU range: 68 - 1536 or 8000 */ dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = np->jumbo ? MAX_JUMBO : PACKET_SIZE; pci_set_drvdata (pdev, dev); ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma); if (!ring_space) goto err_out_iounmap; np->tx_ring = ring_space; np->tx_ring_dma = ring_dma; ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma); if (!ring_space) goto err_out_unmap_tx; np->rx_ring = ring_space; np->rx_ring_dma = ring_dma; /* Parse eeprom data */ parse_eeprom (dev); /* Find PHY address */ err = find_miiphy (dev); if (err) goto err_out_unmap_rx; /* Fiber device? */ np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0; np->link_status = 0; /* Set media and reset PHY */ if (np->phy_media) { /* default Auto-Negotiation for fiber deivices */ if (np->an_enable == 2) { np->an_enable = 1; } } else { /* Auto-Negotiation is mandatory for 1000BASE-T, IEEE 802.3ab Annex 28D page 14 */ if (np->speed == 1000) np->an_enable = 1; } err = register_netdev (dev); if (err) goto err_out_unmap_rx; card_idx++; printk (KERN_INFO "%s: %s, %pM, IRQ %d\n", dev->name, np->name, dev->dev_addr, irq); if (tx_coalesce > 1) printk(KERN_INFO "tx_coalesce:\t%d packets\n", tx_coalesce); if (np->coalesce) printk(KERN_INFO "rx_coalesce:\t%d packets\n" "rx_timeout: \t%d ns\n", np->rx_coalesce, np->rx_timeout*640); if (np->vlan) printk(KERN_INFO "vlan(id):\t%d\n", np->vlan); return 0; err_out_unmap_rx: pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); err_out_unmap_tx: pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); err_out_iounmap: #ifdef MEM_MAPPING pci_iounmap(pdev, np->ioaddr); #endif pci_iounmap(pdev, np->eeprom_addr); err_out_dev: free_netdev (dev); err_out_res: pci_release_regions (pdev); err_out_disable: pci_disable_device (pdev); return err; }
static int acm_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_cdc_union_desc *union_header = NULL; struct usb_cdc_country_functional_desc *cfd = NULL; unsigned char *buffer = intf->altsetting->extra; int buflen = intf->altsetting->extralen; struct usb_interface *control_interface; struct usb_interface *data_interface; struct usb_endpoint_descriptor *epctrl = NULL; struct usb_endpoint_descriptor *epread = NULL; struct usb_endpoint_descriptor *epwrite = NULL; struct usb_device *usb_dev = interface_to_usbdev(intf); struct acm *acm; int minor; int ctrlsize, readsize; u8 *buf; u8 ac_management_function = 0; u8 call_management_function = 0; int call_interface_num = -1; int data_interface_num = -1; unsigned long quirks; int num_rx_buf; int i; int combined_interfaces = 0; /* normal quirks */ quirks = (unsigned long)id->driver_info; num_rx_buf = (quirks == SINGLE_RX_URB) ? 1 : ACM_NR; /* handle quirks deadly to normal probing*/ if (quirks == NO_UNION_NORMAL) { data_interface = usb_ifnum_to_if(usb_dev, 1); control_interface = usb_ifnum_to_if(usb_dev, 0); goto skip_normal_probe; } /* normal probing*/ if (!buffer) { dev_err(&intf->dev, "Weird descriptor references\n"); return -EINVAL; } if (!buflen) { if (intf->cur_altsetting->endpoint && intf->cur_altsetting->endpoint->extralen && intf->cur_altsetting->endpoint->extra) { dev_dbg(&intf->dev, "Seeking extra descriptors on endpoint\n"); buflen = intf->cur_altsetting->endpoint->extralen; buffer = intf->cur_altsetting->endpoint->extra; } else { dev_err(&intf->dev, "Zero length descriptor references\n"); return -EINVAL; } } while (buflen > 0) { if (buffer[1] != USB_DT_CS_INTERFACE) { dev_err(&intf->dev, "skipping garbage\n"); goto next_desc; } switch (buffer[2]) { case USB_CDC_UNION_TYPE: /* we've found it */ if (union_header) { dev_err(&intf->dev, "More than one " "union descriptor, skipping ...\n"); goto next_desc; } union_header = (struct usb_cdc_union_desc *)buffer; break; case USB_CDC_COUNTRY_TYPE: /* export through sysfs*/ cfd = (struct usb_cdc_country_functional_desc *)buffer; break; case USB_CDC_HEADER_TYPE: /* maybe check version */ break; /* for now we ignore it */ case USB_CDC_ACM_TYPE: ac_management_function = buffer[3]; break; case USB_CDC_CALL_MANAGEMENT_TYPE: call_management_function = buffer[3]; call_interface_num = buffer[4]; if ( (quirks & NOT_A_MODEM) == 0 && (call_management_function & 3) != 3) dev_err(&intf->dev, "This device cannot do calls on its own. It is not a modem.\n"); break; default: /* there are LOTS more CDC descriptors that * could legitimately be found here. */ dev_dbg(&intf->dev, "Ignoring descriptor: " "type %02x, length %d\n", buffer[2], buffer[0]); break; } next_desc: buflen -= buffer[0]; buffer += buffer[0]; } if (!union_header) { if (call_interface_num > 0) { dev_dbg(&intf->dev, "No union descriptor, using call management descriptor\n"); /* quirks for Droids MuIn LCD */ if (quirks & NO_DATA_INTERFACE) data_interface = usb_ifnum_to_if(usb_dev, 0); else data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = call_interface_num)); control_interface = intf; } else { if (intf->cur_altsetting->desc.bNumEndpoints != 3) { dev_dbg(&intf->dev,"No union descriptor, giving up\n"); return -ENODEV; } else { dev_warn(&intf->dev,"No union descriptor, testing for castrated device\n"); combined_interfaces = 1; control_interface = data_interface = intf; goto look_for_collapsed_interface; } } } else { control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0); data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = union_header->bSlaveInterface0)); if (!control_interface || !data_interface) { dev_dbg(&intf->dev, "no interfaces\n"); return -ENODEV; } } if (data_interface_num != call_interface_num) dev_dbg(&intf->dev, "Separate call control interface. That is not fully supported.\n"); if (control_interface == data_interface) { /* some broken devices designed for windows work this way */ dev_warn(&intf->dev,"Control and data interfaces are not separated!\n"); combined_interfaces = 1; /* a popular other OS doesn't use it */ quirks |= NO_CAP_LINE; if (data_interface->cur_altsetting->desc.bNumEndpoints != 3) { dev_err(&intf->dev, "This needs exactly 3 endpoints\n"); return -EINVAL; } look_for_collapsed_interface: for (i = 0; i < 3; i++) { struct usb_endpoint_descriptor *ep; ep = &data_interface->cur_altsetting->endpoint[i].desc; if (usb_endpoint_is_int_in(ep)) epctrl = ep; else if (usb_endpoint_is_bulk_out(ep)) epwrite = ep; else if (usb_endpoint_is_bulk_in(ep)) epread = ep; else return -EINVAL; } if (!epctrl || !epread || !epwrite) return -ENODEV; else goto made_compressed_probe; } skip_normal_probe: /*workaround for switched interfaces */ if (data_interface->cur_altsetting->desc.bInterfaceClass != CDC_DATA_INTERFACE_TYPE) { if (control_interface->cur_altsetting->desc.bInterfaceClass == CDC_DATA_INTERFACE_TYPE) { struct usb_interface *t; dev_dbg(&intf->dev, "Your device has switched interfaces.\n"); t = control_interface; control_interface = data_interface; data_interface = t; } else { return -EINVAL; } } /* Accept probe requests only for the control interface */ if (!combined_interfaces && intf != control_interface) return -ENODEV; if (!combined_interfaces && usb_interface_claimed(data_interface)) { /* valid in this context */ dev_dbg(&intf->dev, "The data interface isn't available\n"); return -EBUSY; } if (data_interface->cur_altsetting->desc.bNumEndpoints < 2 || control_interface->cur_altsetting->desc.bNumEndpoints == 0) return -EINVAL; epctrl = &control_interface->cur_altsetting->endpoint[0].desc; epread = &data_interface->cur_altsetting->endpoint[0].desc; epwrite = &data_interface->cur_altsetting->endpoint[1].desc; /* workaround for switched endpoints */ if (!usb_endpoint_dir_in(epread)) { /* descriptors are swapped */ struct usb_endpoint_descriptor *t; dev_dbg(&intf->dev, "The data interface has switched endpoints\n"); t = epread; epread = epwrite; epwrite = t; } made_compressed_probe: dev_dbg(&intf->dev, "interfaces are valid\n"); acm = kzalloc(sizeof(struct acm), GFP_KERNEL); if (acm == NULL) { dev_err(&intf->dev, "out of memory (acm kzalloc)\n"); goto alloc_fail; } minor = acm_alloc_minor(acm); if (minor == ACM_TTY_MINORS) { dev_err(&intf->dev, "no more free acm devices\n"); kfree(acm); return -ENODEV; } ctrlsize = usb_endpoint_maxp(epctrl); readsize = usb_endpoint_maxp(epread) * (quirks == SINGLE_RX_URB ? 1 : 2); acm->combined_interfaces = combined_interfaces; acm->writesize = usb_endpoint_maxp(epwrite) * 20; acm->control = control_interface; acm->data = data_interface; acm->minor = minor; acm->dev = usb_dev; acm->ctrl_caps = ac_management_function; if (quirks & NO_CAP_LINE) acm->ctrl_caps &= ~USB_CDC_CAP_LINE; acm->ctrlsize = ctrlsize; acm->readsize = readsize; acm->rx_buflimit = num_rx_buf; INIT_WORK(&acm->work, acm_softint); spin_lock_init(&acm->write_lock); spin_lock_init(&acm->read_lock); mutex_init(&acm->mutex); acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); acm->is_int_ep = usb_endpoint_xfer_int(epread); if (acm->is_int_ep) acm->bInterval = epread->bInterval; tty_port_init(&acm->port); acm->port.ops = &acm_port_ops; buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma); if (!buf) { dev_err(&intf->dev, "out of memory (ctrl buffer alloc)\n"); goto alloc_fail2; } acm->ctrl_buffer = buf; if (acm_write_buffers_alloc(acm) < 0) { dev_err(&intf->dev, "out of memory (write buffer alloc)\n"); goto alloc_fail4; } acm->ctrlurb = usb_alloc_urb(0, GFP_KERNEL); if (!acm->ctrlurb) { dev_err(&intf->dev, "out of memory (ctrlurb kmalloc)\n"); goto alloc_fail5; } for (i = 0; i < num_rx_buf; i++) { struct acm_rb *rb = &(acm->read_buffers[i]); struct urb *urb; rb->base = usb_alloc_coherent(acm->dev, readsize, GFP_KERNEL, &rb->dma); if (!rb->base) { dev_err(&intf->dev, "out of memory " "(read bufs usb_alloc_coherent)\n"); goto alloc_fail6; } rb->index = i; rb->instance = acm; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { dev_err(&intf->dev, "out of memory (read urbs usb_alloc_urb)\n"); goto alloc_fail6; } urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; urb->transfer_dma = rb->dma; if (acm->is_int_ep) { usb_fill_int_urb(urb, acm->dev, acm->rx_endpoint, rb->base, acm->readsize, acm_read_bulk_callback, rb, acm->bInterval); } else { usb_fill_bulk_urb(urb, acm->dev, acm->rx_endpoint, rb->base, acm->readsize, acm_read_bulk_callback, rb); } acm->read_urbs[i] = urb; __set_bit(i, &acm->read_urbs_free); } for (i = 0; i < ACM_NW; i++) { struct acm_wb *snd = &(acm->wb[i]); snd->urb = usb_alloc_urb(0, GFP_KERNEL); if (snd->urb == NULL) { dev_err(&intf->dev, "out of memory (write urbs usb_alloc_urb)\n"); goto alloc_fail7; } if (usb_endpoint_xfer_int(epwrite)) usb_fill_int_urb(snd->urb, usb_dev, usb_sndintpipe(usb_dev, epwrite->bEndpointAddress), NULL, acm->writesize, acm_write_bulk, snd, epwrite->bInterval); else usb_fill_bulk_urb(snd->urb, usb_dev, usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress), NULL, acm->writesize, acm_write_bulk, snd); snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; snd->instance = acm; } usb_set_intfdata(intf, acm); i = device_create_file(&intf->dev, &dev_attr_bmCapabilities); if (i < 0) goto alloc_fail7; if (cfd) { /* export the country data */ acm->country_codes = kmalloc(cfd->bLength - 4, GFP_KERNEL); if (!acm->country_codes) goto skip_countries; acm->country_code_size = cfd->bLength - 4; memcpy(acm->country_codes, (u8 *)&cfd->wCountyCode0, cfd->bLength - 4); acm->country_rel_date = cfd->iCountryCodeRelDate; i = device_create_file(&intf->dev, &dev_attr_wCountryCodes); if (i < 0) { kfree(acm->country_codes); acm->country_codes = NULL; acm->country_code_size = 0; goto skip_countries; } i = device_create_file(&intf->dev, &dev_attr_iCountryCodeRelDate); if (i < 0) { device_remove_file(&intf->dev, &dev_attr_wCountryCodes); kfree(acm->country_codes); acm->country_codes = NULL; acm->country_code_size = 0; goto skip_countries; } } skip_countries: usb_fill_int_urb(acm->ctrlurb, usb_dev, usb_rcvintpipe(usb_dev, epctrl->bEndpointAddress), acm->ctrl_buffer, ctrlsize, acm_ctrl_irq, acm, /* works around buggy devices */ epctrl->bInterval ? epctrl->bInterval : 0xff); acm->ctrlurb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; acm->ctrlurb->transfer_dma = acm->ctrl_dma; dev_info(&intf->dev, "ttyACM%d: USB ACM device\n", minor); acm_set_control(acm, acm->ctrlout); acm->line.dwDTERate = cpu_to_le32(9600); acm->line.bDataBits = 8; acm_set_line(acm, &acm->line); usb_driver_claim_interface(&acm_driver, data_interface, acm); usb_set_intfdata(data_interface, acm); usb_get_intf(control_interface); tty_register_device(acm_tty_driver, minor, &control_interface->dev); return 0; alloc_fail7: for (i = 0; i < ACM_NW; i++) usb_free_urb(acm->wb[i].urb); alloc_fail6: for (i = 0; i < num_rx_buf; i++) usb_free_urb(acm->read_urbs[i]); acm_read_buffers_free(acm); usb_free_urb(acm->ctrlurb); alloc_fail5: acm_write_buffers_free(acm); alloc_fail4: usb_free_coherent(usb_dev, ctrlsize, acm->ctrl_buffer, acm->ctrl_dma); alloc_fail2: acm_release_minor(acm); kfree(acm); alloc_fail: return -ENOMEM; }
/* * driver allocation handlers. */ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) { int retval = -ENOMEM; /* * Allocate the driver data memory, if necessary. */ if (rt2x00dev->ops->drv_data_size > 0) { rt2x00dev->drv_data = kzalloc(rt2x00dev->ops->drv_data_size, GFP_KERNEL); if (!rt2x00dev->drv_data) { retval = -ENOMEM; goto exit; } } spin_lock_init(&rt2x00dev->irqmask_lock); mutex_init(&rt2x00dev->csr_mutex); set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); /* * Make room for rt2x00_intf inside the per-interface * structure ieee80211_vif. */ rt2x00dev->hw->vif_data_size = sizeof(struct rt2x00_intf); /* * Determine which operating modes are supported, all modes * which require beaconing, depend on the availability of * beacon entries. */ rt2x00dev->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); if (rt2x00dev->ops->bcn->entry_num > 0) rt2x00dev->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MESH_POINT) | BIT(NL80211_IFTYPE_WDS); rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; /* * Initialize work. */ rt2x00dev->workqueue = alloc_ordered_workqueue(wiphy_name(rt2x00dev->hw->wiphy), 0); if (!rt2x00dev->workqueue) { retval = -ENOMEM; goto exit; } INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled); INIT_DELAYED_WORK(&rt2x00dev->autowakeup_work, rt2x00lib_autowakeup); INIT_WORK(&rt2x00dev->sleep_work, rt2x00lib_sleep); /* * Let the driver probe the device to detect the capabilities. */ retval = rt2x00dev->ops->lib->probe_hw(rt2x00dev); if (retval) { ERROR(rt2x00dev, "Failed to allocate device.\n"); goto exit; } /* * Allocate queue array. */ retval = rt2x00queue_allocate(rt2x00dev); if (retval) goto exit; /* * Initialize ieee80211 structure. */ retval = rt2x00lib_probe_hw(rt2x00dev); if (retval) { ERROR(rt2x00dev, "Failed to initialize hw.\n"); goto exit; } /* * Register extra components. */ rt2x00link_register(rt2x00dev); rt2x00leds_register(rt2x00dev); rt2x00debug_register(rt2x00dev); rt2x00rfkill_register(rt2x00dev); return 0; exit: rt2x00lib_remove_dev(rt2x00dev); return retval; }
static __devinit int max14688_probe (struct i2c_client *client, const struct i2c_device_id *id) { struct max14688_platform_data *pdata = client->dev.platform_data; struct max14688 *me; u8 chip_id, chip_rev; int i, rc; u8 pincontrol2 = 0; log_dbg(MAX14688_NAME" attached\n"); log_dbg("wake_lock_init\n"); wake_lock_init(&ear_key_wake_lock, WAKE_LOCK_SUSPEND, "ear_key"); me = kzalloc(sizeof(struct max14688), GFP_KERNEL); if (me == NULL) { log_err("Failed to allloate headset per device info\n"); return -ENOMEM; } if (client->dev.of_node) { pdata = devm_kzalloc(&client->dev, sizeof(struct max14688_platform_data), GFP_KERNEL); if (unlikely(!pdata)) { log_err("out of memory (%uB requested)\n", sizeof(struct max14688_platform_data)); return -ENOMEM; } client->dev.platform_data = pdata; max14688_parse_dt(&client->dev, pdata); } else { pdata = devm_kzalloc(&client->dev, sizeof(struct max14688_platform_data), GFP_KERNEL); if (unlikely(!pdata)) { log_err("out of memory (%uB requested)\n", sizeof(struct max14688_platform_data)); return -ENOMEM; } else { pdata = client->dev.platform_data; } } i2c_set_clientdata(client, me); spin_lock_init(&me->irq_lock); mutex_init(&me->lock); me->dev = &client->dev; me->kobj = &client->dev.kobj; me->irq = -1; me->gpio_int = pdata->gpio_int; me->gpio_detect = pdata->gpio_detect; INIT_DELAYED_WORK(&me->irq_work, max14688_irq_work); INIT_DELAYED_WORK(&me->det_work, max14688_det_work); #ifdef I2C_SUSPEND_WORKAROUND INIT_DELAYED_WORK(&me->check_suspended_work, max14688_check_suspended_worker); #endif rc = gpio_request(me->gpio_detect, MAX14688_NAME"-detect"); if (unlikely(rc)) { return rc; } rc = gpio_direction_input(me->gpio_detect); if (rc < 0) { log_err("Failed to configure gpio%d (me->gpio_detect) gpio_direction_input\n", me->gpio_detect); gpio_free(me->gpio_detect); return rc; } rc = gpio_request(me->gpio_int, MAX14688_NAME"-irq"); if (unlikely(rc)) { return rc; } rc = gpio_direction_input(me->gpio_int); if (rc < 0) { log_err("Failed to configure gpio%d (me->gpio_int) gpio_direction_input\n", me->gpio_int); gpio_free(me->gpio_int); return rc; } me->irq = gpio_to_irq(me->gpio_int); /* Save jack lookup table given via platform data */ me->jack_matches = pdata->jack_matches; me->num_of_jack_matches = pdata->num_of_jack_matches; /* Save button lookup table given via platform data */ me->button_matches = pdata->button_matches; me->num_of_button_matches = pdata->num_of_button_matches; me->matched_jack = -1; me->matched_button = -1; /* Platform-specific Calls */ me->detect_jack = pdata->detect_jack; me->read_mic_impedence = pdata->read_mic_impedence; me->read_left_impedence = pdata->read_left_impedence; me->report_jack = pdata->report_jack; me->report_button = pdata->report_button; /* Disable & Clear all interrupts */ max14688_write(me, MASK, 0x00); max14688_read(me, INTERRUPT, &me->irq_saved); /* INT AUTO disable(INT follows the state diagram and flow chart) */ max14688_read(me, PINCONTROL2, &pincontrol2); max14688_write(me, PINCONTROL2, ~PINCONTROL2_INTAUTO & pincontrol2); max14688_read(me, PINCONTROL2, &pincontrol2); log_dbg("%s[pincontrol2 = %d]\n", __func__, pincontrol2); /* Default MODE setting */ max14688_write_mode0(me, MAX14688_MODE_LOW); max14688_write_mode1(me, MAX14688_MODE_LOW); me->irq_saved = 0; me->irq_unmask = 0; log_dbg("%s[me->irq_saved = %d]\n", __func__, me->irq_saved); /* Register input_dev */ me->input_dev = input_allocate_device(); if (unlikely(!me->input_dev)) { log_err("failed to allocate memory for new input device\n"); rc = -ENOMEM; goto abort; } /* initialize switch device */ me->sdev.name = pdata->switch_name; rc = switch_dev_register(&me->sdev); if (rc < 0) { log_err("Failed to register switch device\n"); switch_dev_unregister(&me->sdev); goto abort; } me->input_dev->name = DRIVER_NAME; me->input_dev->phys = DRIVER_NAME"/input0"; me->input_dev->dev.parent = me->dev; for (i = 0; i < me->num_of_jack_matches; i++) { if (likely(me->jack_matches[i].evt_type < EV_MAX)) { input_set_capability(me->input_dev, me->jack_matches[i].evt_type, me->jack_matches[i].evt_code1); if (likely(me->jack_matches[i].evt_code2)) input_set_capability(me->input_dev, me->jack_matches[i].evt_type, me->jack_matches[i].evt_code2); } } for (i = 0; i < me->num_of_button_matches; i++) { if (likely(me->button_matches[i].evt_type < EV_MAX)) { input_set_capability(me->input_dev, me->button_matches[i].evt_type, me->button_matches[i].evt_code); } } rc = input_register_device(me->input_dev); if (unlikely(rc)) { log_err("failed to register input device [%d]\n", rc); input_free_device(me->input_dev); me->input_dev = NULL; goto abort; } /* Create max14688 sysfs attributes */ me->attr_grp = &max14688_attr_group; rc = sysfs_create_group(me->kobj, me->attr_grp); if (unlikely(rc)) { log_err("failed to create attribute group [%d]\n", rc); me->attr_grp = NULL; goto abort; } /* Get MAX14688 IRQ */ if (unlikely(me->irq < 0)) { log_warn("interrupt disabled\n"); } else { /* Request system IRQ for MAX14688 */ rc = request_threaded_irq(me->irq, NULL, max14688_isr, IRQF_ONESHOT | IRQF_TRIGGER_FALLING, DRIVER_NAME, me); if (unlikely(rc < 0)) { log_err("failed to request IRQ(%u) [%d]\n", me->irq, rc); me->irq = -1; goto abort; } disable_irq((unsigned int)me->irq); } max14688_enable_irq(me, IRQ_DET); /* Complete initialization */ log_info(DRIVER_DESC" "DRIVER_VERSION" Installed\n"); chip_id = 0; chip_rev = 0; max14688_read_device_id(me, &chip_id, &chip_rev); log_info("chip id %02X rev %02X\n", chip_id, chip_rev); #ifdef CONFIG_EARJACK_DEBUGGER if (lge_get_board_revno() < HW_REV_1_0) { if (!(max14688_get_status(me, STATUS_INT) && max14688_get_status(me, STATUS_MICIN))) { log_info("not connecting earjack debugger\n"); msm_serial_set_uart_console(0); } } #endif if (me->detect_jack(me->dev)) { max14688_irq_jack_inserted(me); } return 0; abort: i2c_set_clientdata(client, NULL); max14688_destroy(me); return rc; }
static int __devinit gpio_keys_setup_key(struct platform_device *pdev, struct input_dev *input, struct gpio_button_data *bdata, struct gpio_keys_button *button) { const char *desc = button->desc ? button->desc : "gpio_keys"; struct device *dev = &pdev->dev; irq_handler_t isr; unsigned long irqflags; int irq, error; bdata->input = input; bdata->button = button; spin_lock_init(&bdata->lock); if (gpio_is_valid(button->gpio)) { error = gpio_request(button->gpio, desc); if (error < 0) { dev_err(dev, "Failed to request GPIO %d, error %d\n", button->gpio, error); return error; } error = gpio_direction_input(button->gpio); if (error < 0) { dev_err(dev, "Failed to configure direction for GPIO %d, error %d\n", button->gpio, error); goto fail; } if (button->debounce_interval) { error = gpio_set_debounce(button->gpio, button->debounce_interval * 1000); /* use timer if gpiolib doesn't provide debounce */ if (error < 0) bdata->timer_debounce = button->debounce_interval; } irq = gpio_to_irq(button->gpio); if (irq < 0) { error = irq; dev_err(dev, "Unable to get irq number for GPIO %d, error %d\n", button->gpio, error); goto fail; } bdata->irq = irq; INIT_WORK(&bdata->work, gpio_keys_gpio_work_func); setup_timer(&bdata->timer, gpio_keys_gpio_timer, (unsigned long)bdata); isr = gpio_keys_gpio_isr; irqflags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING; } else { if (!button->irq) { dev_err(dev, "No IRQ specified\n"); return -EINVAL; } bdata->irq = button->irq; if (button->type && button->type != EV_KEY) { dev_err(dev, "Only EV_KEY allowed for IRQ buttons.\n"); return -EINVAL; } bdata->timer_debounce = button->debounce_interval; setup_timer(&bdata->timer, gpio_keys_irq_timer, (unsigned long)bdata); isr = gpio_keys_irq_isr; irqflags = 0; } input_set_capability(input, button->type ?: EV_KEY, button->code); /* * If platform has specified that the button can be disabled, * we don't want it to share the interrupt line. */ if (!button->can_disable) irqflags |= IRQF_SHARED; if (button->wakeup) irqflags |= IRQF_NO_SUSPEND; error = request_any_context_irq(bdata->irq, isr, irqflags, desc, bdata); if (error < 0) { dev_err(dev, "Unable to claim irq %d; error %d\n", bdata->irq, error); goto fail; } return 0; fail: if (gpio_is_valid(button->gpio)) gpio_free(button->gpio); return error; }
/* * This creates a new process as a copy of the old one, * but does not actually start it yet. * * It copies the registers, and all the appropriate * parts of the process environment (as per the clone * flags). The actual kick-off is left to the caller. */ static struct task_struct *copy_process(unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, int __user *child_tidptr, struct pid *pid, int trace) { int retval; struct task_struct *p; if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) return ERR_PTR(-EINVAL); if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) return ERR_PTR(-EINVAL); /* * Thread groups must share signals as well, and detached threads * can only be started up within the thread group. */ if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) return ERR_PTR(-EINVAL); /* * Shared signal handlers imply shared VM. By way of the above, * thread groups also imply shared VM. Blocking this case allows * for various simplifications in other code. */ if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) return ERR_PTR(-EINVAL); /* * Siblings of global init remain as zombies on exit since they are * not reaped by their parent (swapper). To solve this and to avoid * multi-rooted process trees, prevent global and container-inits * from creating siblings. */ if ((clone_flags & CLONE_PARENT) && current->signal->flags & SIGNAL_UNKILLABLE) return ERR_PTR(-EINVAL); /* * If the new process will be in a different pid namespace don't * allow it to share a thread group or signal handlers with the * forking task. */ if ((clone_flags & (CLONE_SIGHAND | CLONE_NEWPID)) && (task_active_pid_ns(current) != current->nsproxy->pid_ns)) return ERR_PTR(-EINVAL); retval = security_task_create(clone_flags); if (retval) goto fork_out; retval = -ENOMEM; p = dup_task_struct(current); if (!p) goto fork_out; ftrace_graph_init_task(p); rt_mutex_init_task(p); #ifdef CONFIG_PROVE_LOCKING DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); #endif retval = -EAGAIN; if (atomic_read(&p->real_cred->user->processes) >= task_rlimit(p, RLIMIT_NPROC)) { if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && p->real_cred->user != INIT_USER) goto bad_fork_free; } current->flags &= ~PF_NPROC_EXCEEDED; retval = copy_creds(p, clone_flags); if (retval < 0) goto bad_fork_free; /* * If multiple threads are within copy_process(), then this check * triggers too late. This doesn't hurt, the check is only there * to stop root fork bombs. */ retval = -EAGAIN; if (nr_threads >= max_threads) goto bad_fork_cleanup_count; if (!try_module_get(task_thread_info(p)->exec_domain->module)) goto bad_fork_cleanup_count; p->did_exec = 0; delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ copy_flags(clone_flags, p); INIT_LIST_HEAD(&p->children); INIT_LIST_HEAD(&p->sibling); rcu_copy_process(p); p->vfork_done = NULL; spin_lock_init(&p->alloc_lock); init_sigpending(&p->pending); p->utime = p->stime = p->gtime = 0; p->utimescaled = p->stimescaled = 0; p->cpu_power = 0; #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE p->prev_cputime.utime = p->prev_cputime.stime = 0; #endif #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN seqlock_init(&p->vtime_seqlock); p->vtime_snap = 0; p->vtime_snap_whence = VTIME_SLEEPING; #endif #if defined(SPLIT_RSS_COUNTING) memset(&p->rss_stat, 0, sizeof(p->rss_stat)); #endif p->default_timer_slack_ns = current->timer_slack_ns; task_io_accounting_init(&p->ioac); acct_clear_integrals(p); posix_cpu_timers_init(p); do_posix_clock_monotonic_gettime(&p->start_time); p->real_start_time = p->start_time; monotonic_to_bootbased(&p->real_start_time); p->io_context = NULL; p->audit_context = NULL; if (clone_flags & CLONE_THREAD) threadgroup_change_begin(current); cgroup_fork(p); #ifdef CONFIG_NUMA p->mempolicy = mpol_dup(p->mempolicy); if (IS_ERR(p->mempolicy)) { retval = PTR_ERR(p->mempolicy); p->mempolicy = NULL; goto bad_fork_cleanup_cgroup; } mpol_fix_fork_child_flag(p); #endif #ifdef CONFIG_CPUSETS p->cpuset_mem_spread_rotor = NUMA_NO_NODE; p->cpuset_slab_spread_rotor = NUMA_NO_NODE; seqcount_init(&p->mems_allowed_seq); #endif #ifdef CONFIG_TRACE_IRQFLAGS p->irq_events = 0; p->hardirqs_enabled = 0; p->hardirq_enable_ip = 0; p->hardirq_enable_event = 0; p->hardirq_disable_ip = _THIS_IP_; p->hardirq_disable_event = 0; p->softirqs_enabled = 1; p->softirq_enable_ip = _THIS_IP_; p->softirq_enable_event = 0; p->softirq_disable_ip = 0; p->softirq_disable_event = 0; p->hardirq_context = 0; p->softirq_context = 0; #endif #ifdef CONFIG_LOCKDEP p->lockdep_depth = 0; /* no locks held yet */ p->curr_chain_key = 0; p->lockdep_recursion = 0; #endif #ifdef CONFIG_DEBUG_MUTEXES p->blocked_on = NULL; /* not blocked yet */ #endif #ifdef CONFIG_MEMCG p->memcg_batch.do_batch = 0; p->memcg_batch.memcg = NULL; #endif #ifdef CONFIG_BCACHE p->sequential_io = 0; p->sequential_io_avg = 0; #endif /* Perform scheduler related setup. Assign this task to a CPU. */ sched_fork(p); retval = perf_event_init_task(p); if (retval) goto bad_fork_cleanup_policy; retval = audit_alloc(p); if (retval) goto bad_fork_cleanup_perf; /* copy all the process information */ retval = copy_semundo(clone_flags, p); if (retval) goto bad_fork_cleanup_audit; retval = copy_files(clone_flags, p); if (retval) goto bad_fork_cleanup_semundo; retval = copy_fs(clone_flags, p); if (retval) goto bad_fork_cleanup_files; retval = copy_sighand(clone_flags, p); if (retval) goto bad_fork_cleanup_fs; retval = copy_signal(clone_flags, p); if (retval) goto bad_fork_cleanup_sighand; retval = copy_mm(clone_flags, p); if (retval) goto bad_fork_cleanup_signal; retval = copy_namespaces(clone_flags, p); if (retval) goto bad_fork_cleanup_mm; retval = copy_io(clone_flags, p); if (retval) goto bad_fork_cleanup_namespaces; retval = copy_thread(clone_flags, stack_start, stack_size, p); if (retval) goto bad_fork_cleanup_io; if (pid != &init_struct_pid) { retval = -ENOMEM; pid = alloc_pid(p->nsproxy->pid_ns); if (!pid) goto bad_fork_cleanup_io; } p->pid = pid_nr(pid); p->tgid = p->pid; if (clone_flags & CLONE_THREAD) p->tgid = current->tgid; p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; /* * Clear TID on mm_release()? */ p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; #ifdef CONFIG_BLOCK p->plug = NULL; #endif #ifdef CONFIG_FUTEX p->robust_list = NULL; #ifdef CONFIG_COMPAT p->compat_robust_list = NULL; #endif INIT_LIST_HEAD(&p->pi_state_list); p->pi_state_cache = NULL; #endif uprobe_copy_process(p); /* * sigaltstack should be cleared when sharing the same VM */ if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) p->sas_ss_sp = p->sas_ss_size = 0; /* * Syscall tracing and stepping should be turned off in the * child regardless of CLONE_PTRACE. */ user_disable_single_step(p); clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); #ifdef TIF_SYSCALL_EMU clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); #endif clear_all_latency_tracing(p); /* ok, now we should be set up.. */ if (clone_flags & CLONE_THREAD) p->exit_signal = -1; else if (clone_flags & CLONE_PARENT) p->exit_signal = current->group_leader->exit_signal; else p->exit_signal = (clone_flags & CSIGNAL); p->pdeath_signal = 0; p->exit_state = 0; p->nr_dirtied = 0; p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); p->dirty_paused_when = 0; /* * Ok, make it visible to the rest of the system. * We dont wake it up yet. */ p->group_leader = p; INIT_LIST_HEAD(&p->thread_group); p->task_works = NULL; /* Need tasklist lock for parent etc handling! */ write_lock_irq(&tasklist_lock); /* CLONE_PARENT re-uses the old parent */ if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { p->real_parent = current->real_parent; p->parent_exec_id = current->parent_exec_id; } else { p->real_parent = current; p->parent_exec_id = current->self_exec_id; } spin_lock(¤t->sighand->siglock); /* * Copy seccomp details explicitly here, in case they were changed * before holding sighand lock. */ copy_seccomp(p); /* * Process group and session signals need to be delivered to just the * parent before the fork or both the parent and the child after the * fork. Restart if a signal comes in before we add the new process to * it's process group. * A fatal signal pending means that current will exit, so the new * thread can't slip out of an OOM kill (or normal SIGKILL). */ recalc_sigpending(); if (signal_pending(current)) { spin_unlock(¤t->sighand->siglock); write_unlock_irq(&tasklist_lock); retval = -ERESTARTNOINTR; goto bad_fork_free_pid; } if (likely(p->pid)) { ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); if (thread_group_leader(p)) { if (is_child_reaper(pid)) { ns_of_pid(pid)->child_reaper = p; p->signal->flags |= SIGNAL_UNKILLABLE; } p->signal->leader_pid = pid; p->signal->tty = tty_kref_get(current->signal->tty); attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); attach_pid(p, PIDTYPE_SID, task_session(current)); list_add_tail(&p->sibling, &p->real_parent->children); list_add_tail_rcu(&p->tasks, &init_task.tasks); add_2_adj_tree(p); __this_cpu_inc(process_counts); } else { current->signal->nr_threads++; atomic_inc(¤t->signal->live); atomic_inc(¤t->signal->sigcnt); p->group_leader = current->group_leader; list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); list_add_tail_rcu(&p->thread_node, &p->signal->thread_head); } attach_pid(p, PIDTYPE_PID, pid); nr_threads++; } total_forks++; spin_unlock(¤t->sighand->siglock); syscall_tracepoint_update(p); write_unlock_irq(&tasklist_lock); proc_fork_connector(p); cgroup_post_fork(p); if (clone_flags & CLONE_THREAD) threadgroup_change_end(current); perf_event_fork(p); trace_task_newtask(p, clone_flags); #ifdef CONFIG_RKP_KDP if(rkp_cred_enable) rkp_assign_pgd(p); #endif/*CONFIG_RKP_KDP*/ return p; bad_fork_free_pid: if (pid != &init_struct_pid) free_pid(pid); bad_fork_cleanup_io: if (p->io_context) exit_io_context(p); bad_fork_cleanup_namespaces: exit_task_namespaces(p); bad_fork_cleanup_mm: if (p->mm) mmput(p->mm); bad_fork_cleanup_signal: if (!(clone_flags & CLONE_THREAD)) free_signal_struct(p->signal); bad_fork_cleanup_sighand: __cleanup_sighand(p->sighand); bad_fork_cleanup_fs: exit_fs(p); /* blocking */ bad_fork_cleanup_files: exit_files(p); /* blocking */ bad_fork_cleanup_semundo: exit_sem(p); bad_fork_cleanup_audit: audit_free(p); bad_fork_cleanup_perf: perf_event_free_task(p); bad_fork_cleanup_policy: #ifdef CONFIG_NUMA mpol_put(p->mempolicy); bad_fork_cleanup_cgroup: #endif if (clone_flags & CLONE_THREAD) threadgroup_change_end(current); cgroup_exit(p, 0); delayacct_tsk_free(p); module_put(task_thread_info(p)->exec_domain->module); bad_fork_cleanup_count: atomic_dec(&p->cred->user->processes); exit_creds(p); bad_fork_free: free_task(p); fork_out: return ERR_PTR(retval); }
/* * This creates a new process as a copy of the old one, * but does not actually start it yet. * * It copies the registers, and all the appropriate * parts of the process environment (as per the clone * flags). The actual kick-off is left to the caller. */ static struct task_struct *copy_process(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size, int __user *child_tidptr, struct pid *pid, int trace) { int retval; struct task_struct *p; int cgroup_callbacks_done = 0; if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) return ERR_PTR(-EINVAL); /* * Thread groups must share signals as well, and detached threads * can only be started up within the thread group. */ if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) return ERR_PTR(-EINVAL); /* * Shared signal handlers imply shared VM. By way of the above, * thread groups also imply shared VM. Blocking this case allows * for various simplifications in other code. */ if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) return ERR_PTR(-EINVAL); /* * Siblings of global init remain as zombies on exit since they are * not reaped by their parent (swapper). To solve this and to avoid * multi-rooted process trees, prevent global and container-inits * from creating siblings. */ if ((clone_flags & CLONE_PARENT) && current->signal->flags & SIGNAL_UNKILLABLE) return ERR_PTR(-EINVAL); retval = security_task_create(clone_flags); if (retval) goto fork_out; retval = -ENOMEM; p = dup_task_struct(current); if (!p) goto fork_out; tracehook_init_task(p); ftrace_graph_init_task(p); rt_mutex_init_task(p); #ifdef CONFIG_PROVE_LOCKING DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); #endif retval = -EAGAIN; if (atomic_read(&p->real_cred->user->processes) >= p->signal->rlim[RLIMIT_NPROC].rlim_cur) { if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && p->real_cred->user != INIT_USER) goto bad_fork_free; } retval = copy_creds(p, clone_flags); if (retval < 0) goto bad_fork_free; /* * If multiple threads are within copy_process(), then this check * triggers too late. This doesn't hurt, the check is only there * to stop root fork bombs. */ retval = -EAGAIN; if (nr_threads >= max_threads) goto bad_fork_cleanup_count; if (!try_module_get(task_thread_info(p)->exec_domain->module)) goto bad_fork_cleanup_count; p->did_exec = 0; delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ copy_flags(clone_flags, p); INIT_LIST_HEAD(&p->children); INIT_LIST_HEAD(&p->sibling); rcu_copy_process(p); p->vfork_done = NULL; spin_lock_init(&p->alloc_lock); init_sigpending(&p->pending); p->utime = cputime_zero; p->stime = cputime_zero; p->gtime = cputime_zero; p->utimescaled = cputime_zero; p->stimescaled = cputime_zero; p->prev_utime = cputime_zero; p->prev_stime = cputime_zero; p->default_timer_slack_ns = current->timer_slack_ns; task_io_accounting_init(&p->ioac); acct_clear_integrals(p); posix_cpu_timers_init(p); p->lock_depth = -1; /* -1 = no lock */ do_posix_clock_monotonic_gettime(&p->start_time); p->real_start_time = p->start_time; monotonic_to_bootbased(&p->real_start_time); p->io_context = NULL; p->audit_context = NULL; if (clone_flags & CLONE_THREAD) threadgroup_fork_read_lock(current); cgroup_fork(p); #ifdef CONFIG_NUMA p->mempolicy = mpol_dup(p->mempolicy); if (IS_ERR(p->mempolicy)) { retval = PTR_ERR(p->mempolicy); p->mempolicy = NULL; goto bad_fork_cleanup_cgroup; } mpol_fix_fork_child_flag(p); #endif #ifdef CONFIG_TRACE_IRQFLAGS p->irq_events = 0; #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW p->hardirqs_enabled = 1; #else p->hardirqs_enabled = 0; #endif p->hardirq_enable_ip = 0; p->hardirq_enable_event = 0; p->hardirq_disable_ip = _THIS_IP_; p->hardirq_disable_event = 0; p->softirqs_enabled = 1; p->softirq_enable_ip = _THIS_IP_; p->softirq_enable_event = 0; p->softirq_disable_ip = 0; p->softirq_disable_event = 0; p->hardirq_context = 0; p->softirq_context = 0; #endif #ifdef CONFIG_LOCKDEP p->lockdep_depth = 0; /* no locks held yet */ p->curr_chain_key = 0; p->lockdep_recursion = 0; #endif #ifdef CONFIG_DEBUG_MUTEXES p->blocked_on = NULL; /* not blocked yet */ #endif #ifdef CONFIG_CGROUP_MEM_RES_CTLR p->memcg_batch.do_batch = 0; p->memcg_batch.memcg = NULL; #endif /* Perform scheduler related setup. Assign this task to a CPU. */ sched_fork(p, clone_flags); retval = perf_event_init_task(p); if (retval) goto bad_fork_cleanup_policy; if ((retval = audit_alloc(p))) goto bad_fork_cleanup_policy; /* copy all the process information */ if ((retval = copy_semundo(clone_flags, p))) goto bad_fork_cleanup_audit; if ((retval = copy_files(clone_flags, p))) goto bad_fork_cleanup_semundo; if ((retval = copy_fs(clone_flags, p))) goto bad_fork_cleanup_files; if ((retval = copy_sighand(clone_flags, p))) goto bad_fork_cleanup_fs; if ((retval = copy_signal(clone_flags, p))) goto bad_fork_cleanup_sighand; if ((retval = copy_mm(clone_flags, p))) goto bad_fork_cleanup_signal; if ((retval = copy_namespaces(clone_flags, p))) goto bad_fork_cleanup_mm; if ((retval = copy_io(clone_flags, p))) goto bad_fork_cleanup_namespaces; retval = copy_thread(clone_flags, stack_start, stack_size, p, regs); if (retval) goto bad_fork_cleanup_io; if (pid != &init_struct_pid) { retval = -ENOMEM; pid = alloc_pid(p->nsproxy->pid_ns); if (!pid) goto bad_fork_cleanup_io; if (clone_flags & CLONE_NEWPID) { retval = pid_ns_prepare_proc(p->nsproxy->pid_ns); if (retval < 0) goto bad_fork_free_pid; } } p->pid = pid_nr(pid); p->tgid = p->pid; if (clone_flags & CLONE_THREAD) p->tgid = current->tgid; if (current->nsproxy != p->nsproxy) { retval = ns_cgroup_clone(p, pid); if (retval) goto bad_fork_free_pid; } p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; /* * Clear TID on mm_release()? */ p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; #ifdef CONFIG_FUTEX p->robust_list = NULL; #ifdef CONFIG_COMPAT p->compat_robust_list = NULL; #endif INIT_LIST_HEAD(&p->pi_state_list); p->pi_state_cache = NULL; #endif /* * sigaltstack should be cleared when sharing the same VM */ if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) p->sas_ss_sp = p->sas_ss_size = 0; /* * Syscall tracing and stepping should be turned off in the * child regardless of CLONE_PTRACE. */ user_disable_single_step(p); clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); #ifdef TIF_SYSCALL_EMU clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); #endif clear_all_latency_tracing(p); /* ok, now we should be set up.. */ p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL); p->pdeath_signal = 0; p->exit_state = 0; /* * Ok, make it visible to the rest of the system. * We dont wake it up yet. */ p->group_leader = p; INIT_LIST_HEAD(&p->thread_group); /* Now that the task is set up, run cgroup callbacks if * necessary. We need to run them before the task is visible * on the tasklist. */ cgroup_fork_callbacks(p); cgroup_callbacks_done = 1; /* Need tasklist lock for parent etc handling! */ write_lock_irq(&tasklist_lock); /* CLONE_PARENT re-uses the old parent */ if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { p->real_parent = current->real_parent; p->parent_exec_id = current->parent_exec_id; } else { p->real_parent = current; p->parent_exec_id = current->self_exec_id; } spin_lock(¤t->sighand->siglock); /* * Process group and session signals need to be delivered to just the * parent before the fork or both the parent and the child after the * fork. Restart if a signal comes in before we add the new process to * it's process group. * A fatal signal pending means that current will exit, so the new * thread can't slip out of an OOM kill (or normal SIGKILL). */ recalc_sigpending(); if (signal_pending(current)) { spin_unlock(¤t->sighand->siglock); write_unlock_irq(&tasklist_lock); retval = -ERESTARTNOINTR; goto bad_fork_free_pid; } if (clone_flags & CLONE_THREAD) { atomic_inc(¤t->signal->count); atomic_inc(¤t->signal->live); p->group_leader = current->group_leader; list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); } if (likely(p->pid)) { list_add_tail(&p->sibling, &p->real_parent->children); tracehook_finish_clone(p, clone_flags, trace); if (thread_group_leader(p)) { if (clone_flags & CLONE_NEWPID) p->nsproxy->pid_ns->child_reaper = p; p->signal->leader_pid = pid; tty_kref_put(p->signal->tty); p->signal->tty = tty_kref_get(current->signal->tty); attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); attach_pid(p, PIDTYPE_SID, task_session(current)); list_add_tail_rcu(&p->tasks, &init_task.tasks); __get_cpu_var(process_counts)++; } attach_pid(p, PIDTYPE_PID, pid); nr_threads++; } total_forks++; spin_unlock(¤t->sighand->siglock); write_unlock_irq(&tasklist_lock); proc_fork_connector(p); cgroup_post_fork(p); if (clone_flags & CLONE_THREAD) threadgroup_fork_read_unlock(current); perf_event_fork(p); return p; bad_fork_free_pid: if (pid != &init_struct_pid) free_pid(pid); bad_fork_cleanup_io: if (p->io_context) exit_io_context(p); bad_fork_cleanup_namespaces: exit_task_namespaces(p); bad_fork_cleanup_mm: if (p->mm) { task_lock(p); if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) atomic_dec(&p->mm->oom_disable_count); task_unlock(p); mmput(p->mm); } bad_fork_cleanup_signal: if (!(clone_flags & CLONE_THREAD)) __cleanup_signal(p->signal); bad_fork_cleanup_sighand: __cleanup_sighand(p->sighand); bad_fork_cleanup_fs: exit_fs(p); /* blocking */ bad_fork_cleanup_files: exit_files(p); /* blocking */ bad_fork_cleanup_semundo: exit_sem(p); bad_fork_cleanup_audit: audit_free(p); bad_fork_cleanup_policy: perf_event_free_task(p); #ifdef CONFIG_NUMA mpol_put(p->mempolicy); bad_fork_cleanup_cgroup: #endif if (clone_flags & CLONE_THREAD) threadgroup_fork_read_unlock(current); cgroup_exit(p, cgroup_callbacks_done); delayacct_tsk_free(p); module_put(task_thread_info(p)->exec_domain->module); bad_fork_cleanup_count: atomic_dec(&p->cred->user->processes); exit_creds(p); bad_fork_free: free_task(p); fork_out: return ERR_PTR(retval); }
static int __devinit p54p_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct p54p_priv *priv; struct ieee80211_hw *dev; unsigned long mem_addr, mem_len; int err; err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable new PCI device\n"); return err; } mem_addr = pci_resource_start(pdev, 0); mem_len = pci_resource_len(pdev, 0); if (mem_len < sizeof(struct p54p_csr)) { dev_err(&pdev->dev, "Too short PCI resources\n"); goto err_disable_dev; } err = pci_request_regions(pdev, "p54pci"); if (err) { dev_err(&pdev->dev, "Cannot obtain PCI resources\n"); goto err_disable_dev; } if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { dev_err(&pdev->dev, "No suitable DMA available\n"); goto err_free_reg; } pci_set_master(pdev); pci_try_set_mwi(pdev); pci_write_config_byte(pdev, 0x40, 0); pci_write_config_byte(pdev, 0x41, 0); dev = p54_init_common(sizeof(*priv)); if (!dev) { dev_err(&pdev->dev, "ieee80211 alloc failed\n"); err = -ENOMEM; goto err_free_reg; } priv = dev->priv; priv->pdev = pdev; SET_IEEE80211_DEV(dev, &pdev->dev); pci_set_drvdata(pdev, dev); priv->map = ioremap(mem_addr, mem_len); if (!priv->map) { dev_err(&pdev->dev, "Cannot map device memory\n"); err = -ENOMEM; goto err_free_dev; } priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control), &priv->ring_control_dma); if (!priv->ring_control) { dev_err(&pdev->dev, "Cannot allocate rings\n"); err = -ENOMEM; goto err_iounmap; } priv->common.open = p54p_open; priv->common.stop = p54p_stop; priv->common.tx = p54p_tx; spin_lock_init(&priv->lock); tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev); err = request_firmware(&priv->firmware, "isl3886pci", &priv->pdev->dev); if (err) { dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n"); err = request_firmware(&priv->firmware, "isl3886", &priv->pdev->dev); if (err) goto err_free_common; } err = p54p_open(dev); if (err) goto err_free_common; err = p54_read_eeprom(dev); p54p_stop(dev); if (err) goto err_free_common; err = p54_register_common(dev, &pdev->dev); if (err) goto err_free_common; return 0; err_free_common: release_firmware(priv->firmware); pci_free_consistent(pdev, sizeof(*priv->ring_control), priv->ring_control, priv->ring_control_dma); err_iounmap: iounmap(priv->map); err_free_dev: pci_set_drvdata(pdev, NULL); p54_free_common(dev); err_free_reg: pci_release_regions(pdev); err_disable_dev: pci_disable_device(pdev); return err; }
int nwpserial_register_port(struct uart_port *port) { struct nwpserial_port *up = NULL; int ret = -1; int i; static int first = 1; int dcr_len; int dcr_base; struct device_node *dn; mutex_lock(&nwpserial_mutex); dn = port->dev->of_node; if (dn == NULL) goto out; /* get dcr base. */ dcr_base = dcr_resource_start(dn, 0); /* find matching entry */ for (i = 0; i < NWPSERIAL_NR; i++) if (nwpserial_ports[i].port.iobase == dcr_base) { up = &nwpserial_ports[i]; break; } /* we didn't find a mtching entry, search for a free port */ if (up == NULL) for (i = 0; i < NWPSERIAL_NR; i++) if (nwpserial_ports[i].port.type == PORT_UNKNOWN && nwpserial_ports[i].port.iobase == 0) { up = &nwpserial_ports[i]; break; } if (up == NULL) { ret = -EBUSY; goto out; } if (first) uart_register_driver(&nwpserial_reg); first = 0; up->port.membase = port->membase; up->port.irq = port->irq; up->port.uartclk = port->uartclk; up->port.fifosize = port->fifosize; up->port.regshift = port->regshift; up->port.iotype = port->iotype; up->port.flags = port->flags; up->port.mapbase = port->mapbase; up->port.private_data = port->private_data; if (port->dev) up->port.dev = port->dev; if (up->port.iobase != dcr_base) { up->port.ops = &nwpserial_pops; up->port.fifosize = 16; spin_lock_init(&up->port.lock); up->port.iobase = dcr_base; dcr_len = dcr_resource_len(dn, 0); up->dcr_host = dcr_map(dn, dcr_base, dcr_len); if (!DCR_MAP_OK(up->dcr_host)) { printk(KERN_ERR "Cannot map DCR resources for NWPSERIAL"); goto out; } } ret = uart_add_one_port(&nwpserial_reg, &up->port); if (ret == 0) ret = up->port.line; out: mutex_unlock(&nwpserial_mutex); return ret; }
/** * cypress_probe * * Called by the usb core when a new device is connected that it thinks * this driver might be interested in. * * Pointer to the probe function in the USB driver. This function is * called by the USB core when it thinks it has a struct usb_interface * that this driver can handle. A pointer to the struct usb_device_id * that the USB core used to make this decision is also passed to this * function. If the USB driver claims the struct usb_interface that is * passed to it, it should initialize the device properly and return * 0. If the driver does not want to claim the device, or an error * occurs, it should return a negative error value. */ int cypress_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct usb_cypress *dev = NULL; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; size_t buffer_size; int i, retval = -ENOMEM; /* See if the device offered us matches what we can accept */ if ((udev->descriptor.idVendor != BRL_USB_VENDOR_ID) || (udev->descriptor.idProduct != BRL_USB_PRODUCT_ID)) { return -ENODEV; } dev = kmalloc(sizeof(struct usb_cypress), GFP_ATOMIC); /* allocate memory for our device state and initialize it */ if( dev == NULL ) { printk("cypress_probe: out of memory."); return -ENOMEM; } memset(dev, 0x00, sizeof (*dev)); dev->udev = udev; dev->interface = interface; /* Set up the endpoint information */ /* check out the endpoints */ /* use only the first bulk-in and bulk-out endpoints */ iface_desc = &interface->altsetting[0]; for( i = 0; i < iface_desc->desc.bNumEndpoints; ++i ) { endpoint = &iface_desc->endpoint[i].desc; if( !dev->bulk_in_endpointAddr && (endpoint->bEndpointAddress & USB_DIR_IN) && ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK) ) { /* we found a bulk in endpoint */ buffer_size = endpoint->wMaxPacketSize; dev->bulk_in_size = buffer_size; dev->bulk_in_endpointAddr = endpoint->bEndpointAddress; dev->read_urb = usb_alloc_urb(0, GFP_ATOMIC); if( dev->read_urb == NULL ) { printk("No free urbs available"); goto error; } dev->read_urb->transfer_flags = (URB_NO_TRANSFER_DMA_MAP); dev->bulk_in_buffer = usb_alloc_coherent (udev, buffer_size, GFP_ATOMIC, &dev->read_urb->transfer_dma); if( dev->bulk_in_buffer == NULL ) { printk("Couldn't allocate bulk_in_buffer"); goto error; } usb_fill_bulk_urb(dev->read_urb, udev, usb_rcvbulkpipe(udev, endpoint->bEndpointAddress), dev->bulk_in_buffer, buffer_size, (usb_complete_t)cypress_read_bulk_callback, dev); } if( !dev->bulk_out_endpointAddr && !(endpoint->bEndpointAddress & USB_DIR_IN) && ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK) ) { /* we found a bulk out endpoint */ /* a probe() may sleep and has no restrictions on memory allocations */ dev->write_urb = usb_alloc_urb(0, GFP_ATOMIC); if( dev->write_urb == NULL ) { printk("No free urbs available"); goto error; } dev->bulk_out_endpointAddr = endpoint->bEndpointAddress; /* on some platforms using this kind of buffer alloc * call eliminates a dma "bounce buffer". * * NOTE: you'd normally want i/o buffers that hold * more than one packet, so that i/o delays between * packets don't hurt throughput. */ buffer_size = endpoint->wMaxPacketSize; dev->bulk_out_size = buffer_size; dev->write_urb->transfer_flags = (URB_NO_TRANSFER_DMA_MAP); dev->bulk_out_buffer = usb_alloc_coherent (udev, buffer_size, GFP_ATOMIC, &dev->write_urb->transfer_dma); if( dev->bulk_out_buffer == NULL ) { printk("Couldn't allocate bulk_out_buffer"); goto error; } usb_fill_bulk_urb(dev->write_urb, udev, usb_sndbulkpipe(udev, endpoint->bEndpointAddress), dev->bulk_out_buffer, buffer_size, (usb_complete_t)cypress_write_bulk_callback, dev); } } if (!(dev->bulk_in_endpointAddr && dev->bulk_out_endpointAddr)) { printk("Couldn't find both bulk-in and bulk-out endpoints"); goto error; } dev->present = 1; /* allow device read, write and ioctl */ usb_set_intfdata (interface, dev); /* we can register the device now, as it is ready */ spin_lock_init(&(dev->lock)); /* initialize spinlock to unlocked (new kerenel method) */ /* HK: Begin- connect filesystem hooks */ /* we can register the device now, as it is ready */ retval = usb_register_dev(interface, &cypress_class); if (retval) { /* something prevented us from registering this driver */ printk("Not able to get a minor for this device."); usb_set_intfdata(interface, NULL); goto error; } dev_info(&interface->dev, "BRL USB device now attached to minor: %d\n", interface->minor); /* let the user know the device minor */ dev->read_task = NULL; /* Initialize fs read_task. */ addNode(dev); return 0; error: // please please please remove goto statements! HK:Why? printk("cypress_probe: error occured!\n"); cypress_delete (dev); return retval; }
/*=========================================================================== METHOD: GobiUSBNetOpen (Public Method) DESCRIPTION: Wrapper to usbnet_open, correctly handling autosuspend Start AutoPM thread PARAMETERS pNet [ I ] - Pointer to net device RETURN VALUE: int - 0 for success Negative errno for error ===========================================================================*/ int GobiUSBNetOpen( struct net_device * pNet ) { int status = 0; struct sGobiUSBNet * pGobiDev; struct usbnet * pDev = netdev_priv( pNet ); if (pDev == NULL) { DBG( "failed to get usbnet device\n" ); return -ENXIO; } pGobiDev = (sGobiUSBNet *)pDev->data[0]; if (pGobiDev == NULL) { DBG( "failed to get QMIDevice\n" ); return -ENXIO; } DBG( "\n" ); // Start the AutoPM thread pGobiDev->mAutoPM.mpIntf = pGobiDev->mpIntf; pGobiDev->mAutoPM.mbExit = false; pGobiDev->mAutoPM.mpURBList = NULL; pGobiDev->mAutoPM.mpActiveURB = NULL; spin_lock_init( &pGobiDev->mAutoPM.mURBListLock ); spin_lock_init( &pGobiDev->mAutoPM.mActiveURBLock ); init_completion( &pGobiDev->mAutoPM.mThreadDoWork ); pGobiDev->mAutoPM.mpThread = kthread_run( GobiUSBNetAutoPMThread, &pGobiDev->mAutoPM, "GobiUSBNetAutoPMThread" ); if (IS_ERR( pGobiDev->mAutoPM.mpThread )) { DBG( "AutoPM thread creation error\n" ); return PTR_ERR( pGobiDev->mAutoPM.mpThread ); } // Allow traffic GobiClearDownReason( pGobiDev, NET_IFACE_STOPPED ); // Pass to usbnet_open if defined if (pGobiDev->mpUSBNetOpen != NULL) { status = pGobiDev->mpUSBNetOpen( pNet ); // If usbnet_open was successful enable Auto PM if (status == 0) { #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,33 )) usb_autopm_enable( pGobiDev->mpIntf ); #else usb_autopm_put_interface( pGobiDev->mpIntf ); #endif } } else { DBG( "no USBNetOpen defined\n" ); } return status; }
/* open callback */ static int snd_bcm2835_playback_open(struct snd_pcm_substream *substream) { bcm2835_chip_t *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; bcm2835_alsa_stream_t *alsa_stream; int idx; int err; audio_info(" .. IN (%d)\n", substream->number); audio_info("Alsa open (%d)\n", substream->number); idx = substream->number; if (idx > MAX_SUBSTREAMS) { audio_error ("substream(%d) device doesn't exist max(%d) substreams allowed\n", idx, MAX_SUBSTREAMS); err = -ENODEV; goto out; } /* Check if we are ready */ if (!(chip->avail_substreams & (1 << idx))) { /* We are not ready yet */ audio_error("substream(%d) device is not ready yet\n", idx); err = -EAGAIN; goto out; } alsa_stream = kzalloc(sizeof(bcm2835_alsa_stream_t), GFP_KERNEL); if (alsa_stream == NULL) { return -ENOMEM; } /* Initialise alsa_stream */ alsa_stream->chip = chip; alsa_stream->substream = substream; alsa_stream->idx = idx; chip->alsa_stream[idx] = alsa_stream; sema_init(&alsa_stream->buffers_update_sem, 0); sema_init(&alsa_stream->control_sem, 0); spin_lock_init(&alsa_stream->lock); /* Enabled in start trigger, called on each "fifo irq" after that */ alsa_stream->enable_fifo_irq = 0; alsa_stream->fifo_irq_handler = bcm2835_playback_fifo_irq; runtime->private_data = alsa_stream; runtime->private_free = snd_bcm2835_playback_free; runtime->hw = snd_bcm2835_playback_hw; /* minimum 16 bytes alignment (for vchiq bulk transfers) */ snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 16); err = bcm2835_audio_open(alsa_stream); if (err != 0) { kfree(alsa_stream); return err; } alsa_stream->open = 1; alsa_stream->draining = 1; out: audio_info(" .. OUT =%d\n", err); return err; }
static int nfp_flower_init(struct nfp_app *app) { const struct nfp_pf *pf = app->pf; struct nfp_flower_priv *app_priv; u64 version, features; int err; if (!pf->eth_tbl) { nfp_warn(app->cpp, "FlowerNIC requires eth table\n"); return -EINVAL; } if (!pf->mac_stats_bar) { nfp_warn(app->cpp, "FlowerNIC requires mac_stats BAR\n"); return -EINVAL; } if (!pf->vf_cfg_bar) { nfp_warn(app->cpp, "FlowerNIC requires vf_cfg BAR\n"); return -EINVAL; } version = nfp_rtsym_read_le(app->pf->rtbl, "hw_flower_version", &err); if (err) { nfp_warn(app->cpp, "FlowerNIC requires hw_flower_version memory symbol\n"); return err; } /* We need to ensure hardware has enough flower capabilities. */ if (version != NFP_FLOWER_ALLOWED_VER) { nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n"); return -EINVAL; } app_priv = vzalloc(sizeof(struct nfp_flower_priv)); if (!app_priv) return -ENOMEM; app->priv = app_priv; app_priv->app = app; skb_queue_head_init(&app_priv->cmsg_skbs_high); skb_queue_head_init(&app_priv->cmsg_skbs_low); INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx); init_waitqueue_head(&app_priv->reify_wait_queue); init_waitqueue_head(&app_priv->mtu_conf.wait_q); spin_lock_init(&app_priv->mtu_conf.lock); err = nfp_flower_metadata_init(app); if (err) goto err_free_app_priv; /* Extract the extra features supported by the firmware. */ features = nfp_rtsym_read_le(app->pf->rtbl, "_abi_flower_extra_features", &err); if (err) app_priv->flower_ext_feats = 0; else app_priv->flower_ext_feats = features; /* Tell the firmware that the driver supports lag. */ err = nfp_rtsym_write_le(app->pf->rtbl, "_abi_flower_balance_sync_enable", 1); if (!err) { app_priv->flower_ext_feats |= NFP_FL_FEATS_LAG; nfp_flower_lag_init(&app_priv->nfp_lag); } else if (err == -ENOENT) { nfp_warn(app->cpp, "LAG not supported by FW.\n"); } else { goto err_cleanup_metadata; } return 0; err_cleanup_metadata: nfp_flower_metadata_cleanup(app); err_free_app_priv: vfree(app->priv); return err; }
static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) { struct usb_wwan_intf_private *data; struct usb_host_interface *intf = serial->interface->cur_altsetting; int retval = -ENODEV; __u8 nintf; __u8 ifnum; bool is_gobi1k = id->driver_info ? true : false; dbg("%s", __func__); dbg("Is Gobi 1000 = %d", is_gobi1k); nintf = serial->dev->actconfig->desc.bNumInterfaces; dbg("Num Interfaces = %d", nintf); ifnum = intf->desc.bInterfaceNumber; dbg("This Interface = %d", ifnum); data = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL); if (!data) return -ENOMEM; spin_lock_init(&data->susp_lock); switch (nintf) { case 1: /* QDL mode */ /* Gobi 2000 has a single altsetting, older ones have two */ if (serial->interface->num_altsetting == 2) intf = &serial->interface->altsetting[1]; else if (serial->interface->num_altsetting > 2) break; if (intf->desc.bNumEndpoints == 2 && usb_endpoint_is_bulk_in(&intf->endpoint[0].desc) && usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) { dbg("QDL port found"); if (serial->interface->num_altsetting == 1) { retval = 0; /* Success */ break; } retval = usb_set_interface(serial->dev, ifnum, 1); if (retval < 0) { dev_err(&serial->dev->dev, "Could not set interface, error %d\n", retval); retval = -ENODEV; kfree(data); } } break; case 3: case 4: /* Composite mode; don't bind to the QMI/net interface as that * gets handled by other drivers. */ /* Gobi 1K USB layout: * 0: serial port (doesn't respond) * 1: serial port (doesn't respond) * 2: AT-capable modem port * 3: QMI/net * * Gobi 2K+ USB layout: * 0: QMI/net * 1: DM/DIAG (use libqcdm from ModemManager for communication) * 2: AT-capable modem port * 3: NMEA */ if (ifnum == 1 && !is_gobi1k) { dbg("Gobi 2K+ DM/DIAG interface found"); retval = usb_set_interface(serial->dev, ifnum, 0); if (retval < 0) { dev_err(&serial->dev->dev, "Could not set interface, error %d\n", retval); retval = -ENODEV; kfree(data); } } else if (ifnum == 2) { dbg("Modem port found"); retval = usb_set_interface(serial->dev, ifnum, 0); if (retval < 0) { dev_err(&serial->dev->dev, "Could not set interface, error %d\n", retval); retval = -ENODEV; kfree(data); } } else if (ifnum==3 && !is_gobi1k) { /* * NMEA (serial line 9600 8N1) * # echo "\$GPS_START" > /dev/ttyUSBx * # echo "\$GPS_STOP" > /dev/ttyUSBx */ dbg("Gobi 2K+ NMEA GPS interface found"); retval = usb_set_interface(serial->dev, ifnum, 0); if (retval < 0) { dev_err(&serial->dev->dev, "Could not set interface, error %d\n", retval); retval = -ENODEV; kfree(data); } } break; case 9: if (ifnum != EFS_SYNC_IFC_NUM) { kfree(data); break; } retval = 0; break; default: dev_err(&serial->dev->dev, "unknown number of interfaces: %d\n", nintf); kfree(data); retval = -ENODEV; } /* Set serial->private if not returning -ENODEV */ if (retval != -ENODEV) usb_set_serial_data(serial, data); return retval; }