static int __devinit wm97xx_bat_probe(struct platform_device *dev) { int ret = 0; int props = 1; /* POWER_SUPPLY_PROP_PRESENT */ int i = 0; if (dev->id != -1) return -EINVAL; mutex_init(&work_lock); if (!pdata) { dev_err(&dev->dev, "Please use wm97xx_bat_set_pdata\n"); return -EINVAL; } if (pdata->charge_gpio >= 0 && gpio_is_valid(pdata->charge_gpio)) { ret = gpio_request(pdata->charge_gpio, "BATT CHRG"); if (ret) goto err; ret = gpio_direction_input(pdata->charge_gpio); if (ret) goto err2; props++; /* POWER_SUPPLY_PROP_STATUS */ } if (pdata->batt_tech >= 0) props++; /* POWER_SUPPLY_PROP_TECHNOLOGY */ if (pdata->temp_aux >= 0) props++; /* POWER_SUPPLY_PROP_TEMP */ if (pdata->batt_aux >= 0) props++; /* POWER_SUPPLY_PROP_VOLTAGE_NOW */ if (pdata->max_voltage >= 0) props++; /* POWER_SUPPLY_PROP_VOLTAGE_MAX */ if (pdata->min_voltage >= 0) props++; /* POWER_SUPPLY_PROP_VOLTAGE_MIN */ prop = kzalloc(props * sizeof(*prop), GFP_KERNEL); if (!prop) goto err2; prop[i++] = POWER_SUPPLY_PROP_PRESENT; if (pdata->charge_gpio >= 0) prop[i++] = POWER_SUPPLY_PROP_STATUS; if (pdata->batt_tech >= 0) prop[i++] = POWER_SUPPLY_PROP_TECHNOLOGY; if (pdata->temp_aux >= 0) prop[i++] = POWER_SUPPLY_PROP_TEMP; if (pdata->batt_aux >= 0) prop[i++] = POWER_SUPPLY_PROP_VOLTAGE_NOW; if (pdata->max_voltage >= 0) prop[i++] = POWER_SUPPLY_PROP_VOLTAGE_MAX; if (pdata->min_voltage >= 0) prop[i++] = POWER_SUPPLY_PROP_VOLTAGE_MIN; INIT_WORK(&bat_work, wm97xx_bat_work); if (!pdata->batt_name) { dev_info(&dev->dev, "Please consider setting proper battery " "name in platform definition file, falling " "back to name \"wm97xx-batt\"\n"); bat_ps.name = "wm97xx-batt"; } else bat_ps.name = pdata->batt_name; bat_ps.properties = prop; bat_ps.num_properties = props; ret = power_supply_register(&dev->dev, &bat_ps); if (!ret) schedule_work(&bat_work); else goto err3; return 0; err3: kfree(prop); err2: gpio_free(pdata->charge_gpio); err: return ret; }
void * initarm(struct arm_boot_params *abp) { struct pv_addr kernel_l1pt; struct pv_addr md_addr; struct pv_addr md_bla; struct pv_addr dpcpu; int loop; u_int l1pagetable; vm_offset_t freemempos; vm_offset_t lastalloced; vm_offset_t lastaddr; uint32_t memsize = 32 * 1024 * 1024; sa1110_uart_vaddr = SACOM1_VBASE; boothowto = RB_VERBOSE | RB_SINGLE; /* Default value */ lastaddr = parse_boot_param(abp); cninit(); set_cpufuncs(); physmem = memsize / PAGE_SIZE; pcpu0_init(); /* Do basic tuning, hz etc */ init_param1(); physical_start = (vm_offset_t) KERNBASE; physical_end = lastaddr; physical_freestart = (((vm_offset_t)physical_end) + PAGE_MASK) & ~PAGE_MASK; md_addr.pv_va = md_addr.pv_pa = MDROOT_ADDR; freemempos = (vm_offset_t)round_page(physical_freestart); memset((void *)freemempos, 0, 256*1024); /* Define a macro to simplify memory allocation */ #define valloc_pages(var, np) \ alloc_pages((var).pv_pa, (np)); \ (var).pv_va = (var).pv_pa; #define alloc_pages(var, np) \ (var) = freemempos; \ freemempos += ((np) * PAGE_SIZE);\ memset((char *)(var), 0, ((np) * PAGE_SIZE)); while ((freemempos & (L1_TABLE_SIZE - 1)) != 0) freemempos += PAGE_SIZE; valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); valloc_pages(md_bla, L2_TABLE_SIZE / PAGE_SIZE); alloc_pages(sa1_cache_clean_addr, CPU_SA110_CACHE_CLEAN_SIZE / PAGE_SIZE); for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) { if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { valloc_pages(kernel_pt_table[loop], L2_TABLE_SIZE / PAGE_SIZE); } else { kernel_pt_table[loop].pv_pa = freemempos + (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) * L2_TABLE_SIZE_REAL; kernel_pt_table[loop].pv_va = kernel_pt_table[loop].pv_pa; } } /* * Allocate a page for the system page mapped to V0x00000000 * This page will just contain the system vectors and can be * shared by all processes. */ valloc_pages(systempage, 1); /* Allocate dynamic per-cpu area. */ valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); dpcpu_init((void *)dpcpu.pv_va, 0); /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE); valloc_pages(abtstack, ABT_STACK_SIZE); valloc_pages(undstack, UND_STACK_SIZE); valloc_pages(kernelstack, KSTACK_PAGES); lastalloced = kernelstack.pv_va; /* * Allocate memory for the l1 and l2 page tables. The scheme to avoid * wasting memory by allocating the l1pt on the first 16k memory was * taken from NetBSD rpc_machdep.c. NKPT should be greater than 12 for * this to work (which is supposed to be the case). */ /* * Now we start construction of the L1 page table * We start by mapping the L2 page tables into the L1. * This means that we can replace L1 mappings later on if necessary */ l1pagetable = kernel_l1pt.pv_pa; /* Map the L2 pages tables in the L1 page table */ pmap_link_l2pt(l1pagetable, 0x00000000, &kernel_pt_table[KERNEL_PT_SYS]); pmap_link_l2pt(l1pagetable, KERNBASE, &kernel_pt_table[KERNEL_PT_KERNEL]); pmap_link_l2pt(l1pagetable, 0xd0000000, &kernel_pt_table[KERNEL_PT_IO]); pmap_link_l2pt(l1pagetable, lastalloced & ~((L1_S_SIZE * 4) - 1), &kernel_pt_table[KERNEL_PT_L1]); pmap_link_l2pt(l1pagetable, 0x90000000, &kernel_pt_table[KERNEL_PT_IRQ]); pmap_link_l2pt(l1pagetable, MDROOT_ADDR, &md_bla); for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop) pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00100000, &kernel_pt_table[KERNEL_PT_VMDATA + loop]); pmap_map_chunk(l1pagetable, KERNBASE, KERNBASE, ((uint32_t)lastaddr - KERNBASE), VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Map the DPCPU pages */ pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa, DPCPU_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Map the stack pages */ pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa, IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, md_addr.pv_va, md_addr.pv_pa, MD_ROOT_SIZE * 1024, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa, ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa, UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa, KSTACK_PAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) { pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va, kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); } pmap_map_chunk(l1pagetable, md_bla.pv_va, md_bla.pv_pa, L2_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); /* Map the vector page. */ pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Map the statically mapped devices. */ arm_devmap_bootstrap(l1pagetable, assabet_devmap); pmap_map_chunk(l1pagetable, sa1_cache_clean_addr, 0xf0000000, CPU_SA110_CACHE_CLEAN_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); data_abort_handler_address = (u_int)data_abort_handler; prefetch_abort_handler_address = (u_int)prefetch_abort_handler; undefined_handler_address = (u_int)undefinedinstruction_bounce; undefined_init(); cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT); setttb(kernel_l1pt.pv_pa); cpu_tlb_flushID(); cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)); /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ set_stackptrs(0); /* * We must now clean the cache again.... * Cleaning may be done by reading new data to displace any * dirty data in the cache. This will have happened in setttb() * but since we are boot strapping the addresses used for the read * may have just been remapped and thus the cache could be out * of sync. A re-clean after the switch will cure this. * After booting there are no gross relocations of the kernel thus * this problem will not occur after initarm(). */ cpu_idcache_wbinv_all(); bootverbose = 1; /* Set stack for exception handlers */ init_proc0(kernelstack.pv_va); /* Enable MMU, I-cache, D-cache, write buffer. */ cpufunc_control(0x337f, 0x107d); arm_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL); pmap_curmaxkvaddr = freemempos + KERNEL_PT_VMDATA_NUM * 0x400000; dump_avail[0] = phys_avail[0] = round_page(virtual_avail); dump_avail[1] = phys_avail[1] = 0xc0000000 + 0x02000000 - 1; dump_avail[2] = phys_avail[2] = 0; dump_avail[3] = phys_avail[3] = 0; mutex_init(); vm_max_kernel_address = 0xd0000000; pmap_bootstrap(freemempos, &kernel_l1pt); init_param2(physmem); kdb_init(); return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - sizeof(struct pcb))); }
static int bcm_sf2_sw_setup(struct dsa_switch *ds) { const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; struct bcm_sf2_priv *priv = ds_to_priv(ds); struct device_node *dn; void __iomem **base; unsigned int port; unsigned int i; u32 reg, rev; int ret; spin_lock_init(&priv->indir_lock); mutex_init(&priv->stats_mutex); /* All the interesting properties are at the parent device_node * level */ dn = ds->cd->of_node->parent; bcm_sf2_identify_ports(priv, ds->cd->of_node); priv->irq0 = irq_of_parse_and_map(dn, 0); priv->irq1 = irq_of_parse_and_map(dn, 1); base = &priv->core; for (i = 0; i < BCM_SF2_REGS_NUM; i++) { *base = of_iomap(dn, i); if (*base == NULL) { pr_err("unable to find register: %s\n", reg_names[i]); ret = -ENOMEM; goto out_unmap; } base++; } ret = bcm_sf2_sw_rst(priv); if (ret) { pr_err("unable to software reset switch: %d\n", ret); goto out_unmap; } /* Disable all interrupts and request them */ bcm_sf2_intr_disable(priv); ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0, "switch_0", priv); if (ret < 0) { pr_err("failed to request switch_0 IRQ\n"); goto out_unmap; } ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0, "switch_1", priv); if (ret < 0) { pr_err("failed to request switch_1 IRQ\n"); goto out_free_irq0; } /* Reset the MIB counters */ reg = core_readl(priv, CORE_GMNCFGCFG); reg |= RST_MIB_CNT; core_writel(priv, reg, CORE_GMNCFGCFG); reg &= ~RST_MIB_CNT; core_writel(priv, reg, CORE_GMNCFGCFG); /* Get the maximum number of ports for this switch */ priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1; if (priv->hw_params.num_ports > DSA_MAX_PORTS) priv->hw_params.num_ports = DSA_MAX_PORTS; /* Assume a single GPHY setup if we can't read that property */ if (of_property_read_u32(dn, "brcm,num-gphy", &priv->hw_params.num_gphy)) priv->hw_params.num_gphy = 1; /* Enable all valid ports and disable those unused */ for (port = 0; port < priv->hw_params.num_ports; port++) { /* IMP port receives special treatment */ if ((1 << port) & ds->enabled_port_mask) bcm_sf2_port_setup(ds, port, NULL); else if (dsa_is_cpu_port(ds, port)) bcm_sf2_imp_setup(ds, port); else bcm_sf2_port_disable(ds, port, NULL); } /* Include the pseudo-PHY address and the broadcast PHY address to * divert reads towards our workaround. This is only required for * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such * that we can use the regular SWITCH_MDIO master controller instead. * * By default, DSA initializes ds->phys_mii_mask to * ds->enabled_port_mask to have a 1:1 mapping between Port address * and PHY address in order to utilize the slave_mii_bus instance to * read from Port PHYs. This is not what we want here, so we * initialize phys_mii_mask 0 to always utilize the "master" MDIO * bus backed by the "mdio-unimac" driver. */ if (of_machine_is_compatible("brcm,bcm7445d0")) ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0)); else ds->phys_mii_mask = 0; rev = reg_readl(priv, REG_SWITCH_REVISION); priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & SWITCH_TOP_REV_MASK; priv->hw_params.core_rev = (rev & SF2_REV_MASK); rev = reg_readl(priv, REG_PHY_REVISION); priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK; pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n", priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff, priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff, priv->core, priv->irq0, priv->irq1); return 0; out_free_irq0: free_irq(priv->irq0, priv); out_unmap: base = &priv->core; for (i = 0; i < BCM_SF2_REGS_NUM; i++) { if (*base) iounmap(*base); base++; } return ret; }
static int max732x_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct max732x_platform_data *pdata; struct max732x_chip *chip; struct i2c_client *c; uint16_t addr_a, addr_b; int ret, nr_port; pdata = dev_get_platdata(&client->dev); if (pdata == NULL) { dev_dbg(&client->dev, "no platform data\n"); return -EINVAL; } chip = devm_kzalloc(&client->dev, sizeof(struct max732x_chip), GFP_KERNEL); if (chip == NULL) return -ENOMEM; chip->client = client; nr_port = max732x_setup_gpio(chip, id, pdata->gpio_base); addr_a = (client->addr & 0x0f) | 0x60; addr_b = (client->addr & 0x0f) | 0x50; switch (client->addr & 0x70) { case 0x60: chip->client_group_a = client; if (nr_port > 8) { c = i2c_new_dummy(client->adapter, addr_b); chip->client_group_b = chip->client_dummy = c; } break; case 0x50: chip->client_group_b = client; if (nr_port > 8) { c = i2c_new_dummy(client->adapter, addr_a); chip->client_group_a = chip->client_dummy = c; } break; default: dev_err(&client->dev, "invalid I2C address specified %02x\n", client->addr); ret = -EINVAL; goto out_failed; } mutex_init(&chip->lock); max732x_readb(chip, is_group_a(chip, 0), &chip->reg_out[0]); if (nr_port > 8) max732x_readb(chip, is_group_a(chip, 8), &chip->reg_out[1]); ret = max732x_irq_setup(chip, id); if (ret) goto out_failed; ret = gpiochip_add(&chip->gpio_chip); if (ret) goto out_failed; if (pdata->setup) { ret = pdata->setup(client, chip->gpio_chip.base, chip->gpio_chip.ngpio, pdata->context); if (ret < 0) dev_warn(&client->dev, "setup failed, %d\n", ret); } i2c_set_clientdata(client, chip); return 0; out_failed: max732x_irq_teardown(chip); return ret; }
static int32_t msm_sensor_driver_parse(struct msm_sensor_ctrl_t *s_ctrl) { int32_t rc = 0; CDBG("Enter"); /* Validate input parameters */ /* Allocate memory for sensor_i2c_client */ s_ctrl->sensor_i2c_client = kzalloc(sizeof(*s_ctrl->sensor_i2c_client), GFP_KERNEL); if (!s_ctrl->sensor_i2c_client) { pr_err("failed: no memory sensor_i2c_client %p", s_ctrl->sensor_i2c_client); return -ENOMEM; } /* Allocate memory for mutex */ s_ctrl->msm_sensor_mutex = kzalloc(sizeof(*s_ctrl->msm_sensor_mutex), GFP_KERNEL); if (!s_ctrl->msm_sensor_mutex) { pr_err("failed: no memory msm_sensor_mutex %p", s_ctrl->msm_sensor_mutex); goto FREE_SENSOR_I2C_CLIENT; } /* Parse dt information and store in sensor control structure */ rc = msm_sensor_driver_get_dt_data(s_ctrl); if (rc < 0) { pr_err("failed: rc %d", rc); goto FREE_MUTEX; } /* Initialize mutex */ mutex_init(s_ctrl->msm_sensor_mutex); /* Initilize v4l2 subdev info */ s_ctrl->sensor_v4l2_subdev_info = msm_sensor_driver_subdev_info; s_ctrl->sensor_v4l2_subdev_info_size = ARRAY_SIZE(msm_sensor_driver_subdev_info); /* Initialize default parameters */ rc = msm_sensor_init_default_params(s_ctrl); if (rc < 0) { pr_err("failed: msm_sensor_init_default_params rc %d", rc); goto FREE_DT_DATA; } /* Store sensor control structure in static database */ g_sctrl[s_ctrl->id] = s_ctrl; pr_err("g_sctrl[%d] %p", s_ctrl->id, g_sctrl[s_ctrl->id]); return rc; FREE_DT_DATA: kfree(s_ctrl->sensordata->power_info.gpio_conf->gpio_num_info); kfree(s_ctrl->sensordata->power_info.gpio_conf->cam_gpio_req_tbl); kfree(s_ctrl->sensordata->power_info.gpio_conf); kfree(s_ctrl->sensordata->power_info.cam_vreg); kfree(s_ctrl->sensordata); FREE_MUTEX: kfree(s_ctrl->msm_sensor_mutex); FREE_SENSOR_I2C_CLIENT: kfree(s_ctrl->sensor_i2c_client); return rc; }
static int __devinit adp8870_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct backlight_properties props; struct backlight_device *bl; struct adp8870_bl *data; struct adp8870_backlight_platform_data *pdata = client->dev.platform_data; uint8_t reg_val; int ret; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_err(&client->dev, "SMBUS Byte Data not Supported\n"); return -EIO; } if (!pdata) { dev_err(&client->dev, "no platform data?\n"); return -EINVAL; } ret = adp8870_read(client, ADP8870_MFDVID, ®_val); if (ret < 0) return -EIO; if (ADP8870_MANID(reg_val) != ADP8870_MANUFID) { dev_err(&client->dev, "failed to probe\n"); return -ENODEV; } data = kzalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) return -ENOMEM; data->revid = ADP8870_DEVID(reg_val); data->client = client; data->pdata = pdata; data->id = id->driver_data; data->current_brightness = 0; i2c_set_clientdata(client, data); mutex_init(&data->lock); memset(&props, 0, sizeof(props)); props.type = BACKLIGHT_RAW; props.max_brightness = props.brightness = ADP8870_MAX_BRIGHTNESS; bl = backlight_device_register(dev_driver_string(&client->dev), &client->dev, data, &adp8870_bl_ops, &props); if (IS_ERR(bl)) { dev_err(&client->dev, "failed to register backlight\n"); ret = PTR_ERR(bl); goto out2; } data->bl = bl; if (pdata->en_ambl_sens) ret = sysfs_create_group(&bl->dev.kobj, &adp8870_bl_attr_group); if (ret) { dev_err(&client->dev, "failed to register sysfs\n"); goto out1; } ret = adp8870_bl_setup(bl); if (ret) { ret = -EIO; goto out; } backlight_update_status(bl); dev_info(&client->dev, "Rev.%d Backlight\n", data->revid); if (pdata->num_leds) adp8870_led_probe(client); return 0; out: if (data->pdata->en_ambl_sens) sysfs_remove_group(&data->bl->dev.kobj, &adp8870_bl_attr_group); out1: backlight_device_unregister(bl); out2: i2c_set_clientdata(client, NULL); kfree(data); return ret; }
static int cyttsp4_mt_probe(struct cyttsp4_device *ttsp) { struct device *dev = &ttsp->dev; struct cyttsp4_mt_data *md; struct cyttsp4_mt_platform_data *pdata = dev_get_platdata(dev); int rc = 0; dev_dbg(dev, "%s\n", __func__); dev_dbg(dev, "%s: debug on\n", __func__); dev_vdbg(dev, "%s: verbose debug on\n", __func__); if (pdata == NULL) { dev_err(dev, "%s: Missing platform data\n", __func__); rc = -ENODEV; goto error_no_pdata; } md = kzalloc(sizeof(*md), GFP_KERNEL); if (md == NULL) { dev_err(dev, "%s: Error, kzalloc\n", __func__); rc = -ENOMEM; goto error_alloc_data_failed; } cyttsp4_init_function_ptrs(md); mutex_init(&md->report_lock); md->prv_tch_type = CY_OBJ_STANDARD_FINGER; md->ttsp = ttsp; md->pdata = pdata; dev_set_drvdata(dev, md); /* Create the input device and register it. */ dev_vdbg(dev, "%s: Create the input device and register it\n", __func__); md->input = input_allocate_device(); if (md->input == NULL) { dev_err(dev, "%s: Error, failed to allocate input device\n", __func__); rc = -ENOSYS; goto error_alloc_failed; } md->input->name = ttsp->name; scnprintf(md->phys, sizeof(md->phys)-1, "%s", dev_name(dev)); md->input->phys = md->phys; md->input->dev.parent = &md->ttsp->dev; md->input->open = cyttsp4_mt_open; md->input->close = cyttsp4_mt_close; input_set_drvdata(md->input, md); pm_runtime_enable(dev); /* get sysinfo */ md->si = cyttsp4_request_sysinfo(ttsp); if (md->si) { rc = cyttsp4_setup_input_device(ttsp); if (rc) goto error_init_input; } else { dev_err(dev, "%s: Fail get sysinfo pointer from core p=%p\n", __func__, md->si); cyttsp4_subscribe_attention(ttsp, CY_ATTEN_STARTUP, cyttsp4_setup_input_attention, 0); } //#ifdef CONFIG_HAS_EARLYSUSPEND cyttsp4_setup_early_suspend(md); //#endif dev_dbg(dev, "%s: OK\n", __func__); return 0; error_init_input: pm_runtime_suspend(dev); pm_runtime_disable(dev); input_free_device(md->input); error_alloc_failed: dev_set_drvdata(dev, NULL); kfree(md); error_alloc_data_failed: error_no_pdata: dev_err(dev, "%s failed.\n", __func__); return rc; }
static int htc_35mm_probe(struct platform_device *pdev) { int ret; pd = pdev->dev.platform_data; pr_info("H2W: htc_35mm_jack driver register\n"); hi = kzalloc(sizeof(struct h35_info), GFP_KERNEL); if (!hi) return -ENOMEM; hi->ext_35mm_status = 0; hi->is_ext_insert = 0; hi->mic_bias_state = 0; mutex_init(&hi->mutex_lock); wake_lock_init(&hi->headset_wake_lock, WAKE_LOCK_SUSPEND, "headset"); hi->hs_change.name = "h2w"; hi->hs_change.print_name = h35mm_print_name; ret = switch_dev_register(&hi->hs_change); if (ret < 0) goto err_switch_dev_register; detect_wq = create_workqueue("detection"); if (detect_wq == NULL) { ret = -ENOMEM; goto err_create_detect_work_queue; } button_wq = create_workqueue("button"); if (button_wq == NULL) { ret = -ENOMEM; goto err_create_button_work_queue; } hi->input = input_allocate_device(); if (!hi->input) { ret = -ENOMEM; goto err_request_input_dev; } hi->input->name = "h2w headset"; set_bit(EV_SYN, hi->input->evbit); set_bit(EV_KEY, hi->input->evbit); set_bit(KEY_MEDIA, hi->input->keybit); set_bit(KEY_NEXTSONG, hi->input->keybit); set_bit(KEY_PLAYPAUSE, hi->input->keybit); set_bit(KEY_PREVIOUSSONG, hi->input->keybit); set_bit(KEY_MUTE, hi->input->keybit); set_bit(KEY_VOLUMEUP, hi->input->keybit); set_bit(KEY_VOLUMEDOWN, hi->input->keybit); set_bit(KEY_END, hi->input->keybit); set_bit(KEY_SEND, hi->input->keybit); ret = input_register_device(hi->input); if (ret < 0) goto err_register_input_dev; /* Enable plug events*/ if (pd->plug_event_enable == NULL) { ret = -ENOMEM; goto err_enable_plug_event; } if (pd->plug_event_enable() != 1) { ret = -ENOMEM; goto err_enable_plug_event; } return 0; err_enable_plug_event: err_register_input_dev: input_free_device(hi->input); err_request_input_dev: destroy_workqueue(button_wq); err_create_button_work_queue: destroy_workqueue(detect_wq); err_create_detect_work_queue: switch_dev_unregister(&hi->hs_change); err_switch_dev_register: kzfree(hi); pr_err("H2W: Failed to register driver\n"); return ret; }
struct c2port_device *c2port_device_register(char *name, struct c2port_ops *ops, void *devdata) { struct c2port_device *c2dev; int id, ret; if (unlikely(!ops) || unlikely(!ops->access) || \ unlikely(!ops->c2d_dir) || unlikely(!ops->c2ck_set) || \ unlikely(!ops->c2d_get) || unlikely(!ops->c2d_set)) return ERR_PTR(-EINVAL); c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL); kmemcheck_annotate_bitfield(c2dev, flags); if (unlikely(!c2dev)) return ERR_PTR(-ENOMEM); ret = idr_pre_get(&c2port_idr, GFP_KERNEL); if (!ret) { ret = -ENOMEM; goto error_idr_get_new; } spin_lock_irq(&c2port_idr_lock); ret = idr_get_new(&c2port_idr, c2dev, &id); spin_unlock_irq(&c2port_idr_lock); if (ret < 0) goto error_idr_get_new; c2dev->id = id; c2dev->dev = device_create(c2port_class, NULL, 0, c2dev, "c2port%d", id); if (unlikely(IS_ERR(c2dev->dev))) { ret = PTR_ERR(c2dev->dev); goto error_device_create; } dev_set_drvdata(c2dev->dev, c2dev); strncpy(c2dev->name, name, C2PORT_NAME_LEN); c2dev->ops = ops; mutex_init(&c2dev->mutex); /* Create binary file */ c2port_bin_attrs.size = ops->blocks_num * ops->block_size; ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs); if (unlikely(ret)) goto error_device_create_bin_file; /* By default C2 port access is off */ c2dev->access = c2dev->flash_access = 0; ops->access(c2dev, 0); dev_info(c2dev->dev, "C2 port %s added\n", name); dev_info(c2dev->dev, "%s flash has %d blocks x %d bytes " "(%d bytes total)\n", name, ops->blocks_num, ops->block_size, ops->blocks_num * ops->block_size); return c2dev; error_device_create_bin_file: device_destroy(c2port_class, 0); error_device_create: spin_lock_irq(&c2port_idr_lock); idr_remove(&c2port_idr, id); spin_unlock_irq(&c2port_idr_lock); error_idr_get_new: kfree(c2dev); return ERR_PTR(ret); }
int dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, objset_t **osp) { objset_t *os; int i, err; ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock)); os = kmem_zalloc(sizeof (objset_t), KM_PUSHPAGE); os->os_dsl_dataset = ds; os->os_spa = spa; os->os_rootbp = bp; if (!BP_IS_HOLE(os->os_rootbp)) { uint32_t aflags = ARC_WAIT; zbookmark_t zb; SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); if (DMU_OS_IS_L2CACHEABLE(os)) aflags |= ARC_L2CACHE; if (DMU_OS_IS_L2COMPRESSIBLE(os)) aflags |= ARC_L2COMPRESS; dprintf_bp(os->os_rootbp, "reading %s", ""); err = arc_read(NULL, spa, os->os_rootbp, arc_getbuf_func, &os->os_phys_buf, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb); if (err != 0) { kmem_free(os, sizeof (objset_t)); /* convert checksum errors into IO errors */ if (err == ECKSUM) err = SET_ERROR(EIO); return (err); } /* Increase the blocksize if we are permitted. */ if (spa_version(spa) >= SPA_VERSION_USERSPACE && arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) { arc_buf_t *buf = arc_buf_alloc(spa, sizeof (objset_phys_t), &os->os_phys_buf, ARC_BUFC_METADATA); bzero(buf->b_data, sizeof (objset_phys_t)); bcopy(os->os_phys_buf->b_data, buf->b_data, arc_buf_size(os->os_phys_buf)); (void) arc_buf_remove_ref(os->os_phys_buf, &os->os_phys_buf); os->os_phys_buf = buf; } os->os_phys = os->os_phys_buf->b_data; os->os_flags = os->os_phys->os_flags; } else { int size = spa_version(spa) >= SPA_VERSION_USERSPACE ? sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE; os->os_phys_buf = arc_buf_alloc(spa, size, &os->os_phys_buf, ARC_BUFC_METADATA); os->os_phys = os->os_phys_buf->b_data; bzero(os->os_phys, size); } /* * Note: the changed_cb will be called once before the register * func returns, thus changing the checksum/compression from the * default (fletcher2/off). Snapshots don't need to know about * checksum/compression/copies. */ if (ds) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE), primary_cache_changed_cb, os); if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE), secondary_cache_changed_cb, os); } if (!dsl_dataset_is_snapshot(ds)) { if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_COMPRESSION), compression_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_COPIES), copies_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_DEDUP), dedup_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_LOGBIAS), logbias_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_SYNC), sync_changed_cb, os); } } if (err != 0) { VERIFY(arc_buf_remove_ref(os->os_phys_buf, &os->os_phys_buf)); kmem_free(os, sizeof (objset_t)); return (err); } } else if (ds == NULL) { /* It's the meta-objset. */ os->os_checksum = ZIO_CHECKSUM_FLETCHER_4; os->os_compress = ZIO_COMPRESS_LZJB; os->os_copies = spa_max_replication(spa); os->os_dedup_checksum = ZIO_CHECKSUM_OFF; os->os_dedup_verify = 0; os->os_logbias = 0; os->os_sync = 0; os->os_primary_cache = ZFS_CACHE_ALL; os->os_secondary_cache = ZFS_CACHE_ALL; } if (ds == NULL || !dsl_dataset_is_snapshot(ds)) os->os_zil_header = os->os_phys->os_zil_header; os->os_zil = zil_alloc(os, &os->os_zil_header); for (i = 0; i < TXG_SIZE; i++) { list_create(&os->os_dirty_dnodes[i], sizeof (dnode_t), offsetof(dnode_t, dn_dirty_link[i])); list_create(&os->os_free_dnodes[i], sizeof (dnode_t), offsetof(dnode_t, dn_dirty_link[i])); } list_create(&os->os_dnodes, sizeof (dnode_t), offsetof(dnode_t, dn_link)); list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t), offsetof(dmu_buf_impl_t, db_link)); mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL); DMU_META_DNODE(os) = dnode_special_open(os, &os->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT, &os->os_meta_dnode); if (arc_buf_size(os->os_phys_buf) >= sizeof (objset_phys_t)) { DMU_USERUSED_DNODE(os) = dnode_special_open(os, &os->os_phys->os_userused_dnode, DMU_USERUSED_OBJECT, &os->os_userused_dnode); DMU_GROUPUSED_DNODE(os) = dnode_special_open(os, &os->os_phys->os_groupused_dnode, DMU_GROUPUSED_OBJECT, &os->os_groupused_dnode); } /* * We should be the only thread trying to do this because we * have ds_opening_lock */ if (ds) { mutex_enter(&ds->ds_lock); ASSERT(ds->ds_objset == NULL); ds->ds_objset = os; mutex_exit(&ds->ds_lock); } *osp = os; return (0); }
static int rmidev_init_device(struct rmi_char_device *cd) { struct rmi_device *rmi_dev = cd->rmi_dev; struct rmidev_data *data; dev_t dev_no; int retval; struct device *device_ptr; if (rmidev_major_num) { dev_no = MKDEV(rmidev_major_num, cd->rmi_dev->number); retval = register_chrdev_region(dev_no, 1, CHAR_DEVICE_NAME); } else { retval = alloc_chrdev_region(&dev_no, 0, 1, CHAR_DEVICE_NAME); /* let kernel allocate a major for us */ rmidev_major_num = MAJOR(dev_no); printk( "DTUCH : Device(%s) major number of rmidev(%d)\n", dev_name( &rmi_dev->dev ), rmidev_major_num ); } if (retval < 0) { printk( "DTUCH : Device(%s) failed to get minor dev number%d(%d)\n", dev_name( &rmi_dev->dev ), cd->rmi_dev->number, retval ); return retval; } else printk( "DTUCH : Device(%s) allocated rmidev %d %d\n", dev_name( &rmi_dev->dev ), MAJOR( dev_no ), MINOR( dev_no ) ); data = kzalloc(sizeof(struct rmidev_data), GFP_KERNEL); if (!data) { printk( "DTUCH : Device(%s) failed to allocate rmidev_data\n", dev_name( &rmi_dev->dev ) ); /* unregister the char device region */ __unregister_chrdev(rmidev_major_num, MINOR(dev_no), 1, CHAR_DEVICE_NAME); return -ENOMEM; } mutex_init(&data->file_mutex); data->rmi_dev = cd->rmi_dev; cd->data = data; cdev_init(&data->main_dev, &rmidev_fops); retval = cdev_add(&data->main_dev, dev_no, 1); if (retval) { printk( "DTUCH : Device(%s) error %d adding rmi_char_dev\n", dev_name( &cd->rmi_dev->dev ), retval ); rmidev_device_cleanup(data); return retval; } dev_set_name(&cd->dev, "rmidev%d", MINOR(dev_no)); data->device_class = rmidev_device_class; device_ptr = device_create( data->device_class, NULL, dev_no, NULL, CHAR_DEVICE_NAME"%d", MINOR(dev_no)); if (IS_ERR(device_ptr)) { printk( "DTUCH : Device(%s) failed to create rmi device\n", dev_name( &cd->rmi_dev->dev ) ); rmidev_device_cleanup(data); return -ENODEV; } return 0; }
/** Public interfaces **/ void lv24020lp_init(void) { mutex_init(&tuner_mtx); }
static int __devinit gpio_keys_probe(struct platform_device *pdev) { const struct gpio_keys_platform_data *pdata = pdev->dev.platform_data; struct gpio_keys_drvdata *ddata; struct device *dev = &pdev->dev; struct gpio_keys_platform_data alt_pdata; struct input_dev *input; int i, error; int wakeup = 0; if (!pdata) { error = gpio_keys_get_devtree_pdata(dev, &alt_pdata); if (error) return error; pdata = &alt_pdata; } ddata = kzalloc(sizeof(struct gpio_keys_drvdata) + pdata->nbuttons * sizeof(struct gpio_button_data), GFP_KERNEL); input = input_allocate_device(); if (!ddata || !input) { dev_err(dev, "failed to allocate state\n"); error = -ENOMEM; goto fail1; } ddata->input = input; ddata->n_buttons = pdata->nbuttons; ddata->enable = pdata->enable; ddata->disable = pdata->disable; #ifdef CONFIG_SENSORS_HALL ddata->gpio_flip_cover = pdata->gpio_flip_cover; ddata->irq_flip_cover = gpio_to_irq(ddata->gpio_flip_cover);; wake_lock_init(&ddata->flip_wake_lock, WAKE_LOCK_SUSPEND, "flip wake lock"); #endif mutex_init(&ddata->disable_lock); platform_set_drvdata(pdev, ddata); input_set_drvdata(input, ddata); input->name = pdata->name ? : pdev->name; input->phys = "gpio-keys/input0"; input->dev.parent = &pdev->dev; #ifdef CONFIG_SENSORS_HALL input->evbit[0] |= BIT_MASK(EV_SW); input_set_capability(input, EV_SW, SW_FLIP); #endif input->open = gpio_keys_open; input->close = gpio_keys_close; input->id.bustype = BUS_HOST; input->id.vendor = 0x0001; input->id.product = 0x0001; input->id.version = 0x0100; /* Enable auto repeat feature of Linux input subsystem */ if (pdata->rep) __set_bit(EV_REP, input->evbit); for (i = 0; i < pdata->nbuttons; i++) { struct gpio_keys_button *button = &pdata->buttons[i]; struct gpio_button_data *bdata = &ddata->data[i]; error = gpio_keys_setup_key(pdev, input, bdata, button); if (error) goto fail2; if (button->wakeup) wakeup = 1; #ifdef KEY_BOOSTER if (button->code == KEY_HOMEPAGE) { error = gpio_key_init_dvfs(bdata); if (error < 0) { dev_err(dev, "Fail get dvfs level for touch booster\n"); goto fail2; } } #endif } error = sysfs_create_group(&pdev->dev.kobj, &gpio_keys_attr_group); if (error) { dev_err(dev, "Unable to export keys/switches, error: %d\n", error); goto fail2; } ddata->sec_key = device_create(sec_class, NULL, 0, ddata, "sec_key"); if (IS_ERR(ddata->sec_key)) dev_err(dev, "Failed to create sec_key device\n"); error = sysfs_create_group(&ddata->sec_key->kobj, &sec_key_attr_group); if (error) { dev_err(dev, "Failed to create the test sysfs: %d\n", error); goto fail2; } #ifdef CONFIG_SENSORS_HALL init_hall_ic_irq(input); #endif error = input_register_device(input); if (error) { dev_err(dev, "Unable to register input device, error: %d\n", error); goto fail3; } /* get current state of buttons */ for (i = 0; i < pdata->nbuttons; i++) gpio_keys_report_event(&ddata->data[i]); input_sync(input); #ifdef CONFIG_FAST_BOOT /*Fake power off*/ input_set_capability(input, EV_KEY, KEY_FAKE_PWR); setup_timer(&fake_timer, gpio_keys_fake_off_check, (unsigned long)input); wake_lock_init(&fake_lock, WAKE_LOCK_SUSPEND, "fake_lock"); #endif device_init_wakeup(&pdev->dev, wakeup); #if !defined(CONFIG_SAMSUNG_PRODUCT_SHIP) #if defined(CONFIG_N1A) || defined(CONFIG_N2A) if(set_auto_power_on_off_powerkey_val) { init_timer(&poweroff_keypad_timer); poweroff_keypad_timer.function = poweroff_keypad_timer_handler; poweroff_keypad_timer.data = (unsigned long)&ddata->data[0]; if(lpcharge) poweroff_keypad_timer.expires = jiffies + 20*HZ; else poweroff_keypad_timer.expires = jiffies + 40*HZ; add_timer(&poweroff_keypad_timer); printk("AUTO_POWER_ON_OFF_FLAG Test Start !!!\n"); } #endif #endif return 0; fail3: sysfs_remove_group(&pdev->dev.kobj, &gpio_keys_attr_group); sysfs_remove_group(&ddata->sec_key->kobj, &sec_key_attr_group); fail2: while (--i >= 0) gpio_remove_key(&ddata->data[i]); platform_set_drvdata(pdev, NULL); #ifdef CONFIG_SENSORS_HALL wake_lock_destroy(&ddata->flip_wake_lock); #endif fail1: input_free_device(input); kfree(ddata); /* If we have no platform_data, we allocated buttons dynamically. */ if (!pdev->dev.platform_data) kfree(pdata->buttons); return error; }
int cma3000_init(struct cma3000_accl_data *data) { int ret = 0, fuzz_x, fuzz_y, fuzz_z, g_range; uint32_t irqflags; uint8_t ctrl; INIT_DELAYED_WORK(&data->input_work, cma3000_input_work_func); if (data->client->dev.platform_data == NULL) { dev_err(&data->client->dev, "platform data not found\n"); goto err_op2_failed; } memcpy(&(data->pdata), data->client->dev.platform_data, sizeof(struct cma3000_platform_data)); ret = cma3000_reset(data); if (ret) goto err_op2_failed; ret = cma3000_read(data, CMA3000_REVID, "Revid"); if (ret < 0) goto err_op2_failed; pr_info("CMA3000 Acclerometer : Revision %x\n", ret); /* Bring it out of default power down state */ ret = cma3000_poweron(data); if (ret < 0) goto err_op2_failed; data->req_poll_rate = data->pdata.def_poll_rate; fuzz_x = data->pdata.fuzz_x; fuzz_y = data->pdata.fuzz_y; fuzz_z = data->pdata.fuzz_z; g_range = data->pdata.g_range; irqflags = data->pdata.irqflags; data->input_dev = input_allocate_device(); if (data->input_dev == NULL) { ret = -ENOMEM; dev_err(&data->client->dev, "Failed to allocate input device\n"); goto err_op2_failed; } data->input_dev->name = "cma3000-acclerometer"; #ifdef CONFIG_INPUT_CMA3000_I2C data->input_dev->id.bustype = BUS_I2C; #endif __set_bit(EV_ABS, data->input_dev->evbit); __set_bit(EV_MSC, data->input_dev->evbit); input_set_abs_params(data->input_dev, ABS_X, -g_range, g_range, fuzz_x, 0); input_set_abs_params(data->input_dev, ABS_Y, -g_range, g_range, fuzz_y, 0); input_set_abs_params(data->input_dev, ABS_Z, -g_range, g_range, fuzz_z, 0); input_set_abs_params(data->input_dev, ABS_MISC, 0, 1, 0, 0); ret = input_register_device(data->input_dev); if (ret) { dev_err(&data->client->dev, "Unable to register input device\n"); goto err_op2_failed; } mutex_init(&data->mutex); if (data->client->irq) { ret = request_irq(data->client->irq, cma3000_isr, irqflags | IRQF_ONESHOT, data->client->name, data); if (ret < 0) { dev_err(&data->client->dev, "request_threaded_irq failed\n"); goto err_op1_failed; } } else { /*There is no IRQ set, disable IRQ on CMA*/ ctrl = cma3000_read(data, CMA3000_CTRL, "Status"); ctrl |= 0x1; cma3000_set(data, CMA3000_CTRL, ctrl, "Disable IRQ"); } ret = sysfs_create_group(&data->client->dev.kobj, &cma3000_attr_group); if (ret) { dev_err(&data->client->dev, "failed to create sysfs entries\n"); goto err_op1_failed; } cma3000_set_mode(data, CMAMODE_POFF); return 0; err_op1_failed: mutex_destroy(&data->mutex); input_unregister_device(data->input_dev); err_op2_failed: if (data != NULL) { if (data->input_dev != NULL) input_free_device(data->input_dev); } return ret; }
static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); struct usb_host_interface *interface = intf->cur_altsetting; struct usb_endpoint_descriptor *endpoint; struct wacom *wacom; struct wacom_wac *wacom_wac; struct wacom_features *features; struct input_dev *input_dev; int error = -ENOMEM; char rep_data[2], limit = 0; struct hid_descriptor *hid_desc; wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL); wacom_wac = kzalloc(sizeof(struct wacom_wac), GFP_KERNEL); input_dev = input_allocate_device(); if (!wacom || !input_dev || !wacom_wac) goto fail1; wacom_wac->data = usb_buffer_alloc(dev, 10, GFP_KERNEL, &wacom->data_dma); if (!wacom_wac->data) goto fail1; wacom->irq = usb_alloc_urb(0, GFP_KERNEL); if (!wacom->irq) goto fail2; wacom->usbdev = dev; wacom->dev = input_dev; wacom->intf = intf; mutex_init(&wacom->lock); usb_make_path(dev, wacom->phys, sizeof(wacom->phys)); strlcat(wacom->phys, "/input0", sizeof(wacom->phys)); wacom_wac->features = features = get_wacom_feature(id); BUG_ON(features->pktlen > 10); input_dev->name = wacom_wac->features->name; wacom->wacom_wac = wacom_wac; usb_to_input_id(dev, &input_dev->id); input_dev->dev.parent = &intf->dev; input_set_drvdata(input_dev, wacom); input_dev->open = wacom_open; input_dev->close = wacom_close; endpoint = &intf->cur_altsetting->endpoint[0].desc; /* TabletPC need to retrieve the physical and logical maximum from report descriptor */ if (wacom_wac->features->type == TABLETPC) { if (usb_get_extra_descriptor(interface, HID_DEVICET_HID, &hid_desc)) { if (usb_get_extra_descriptor(&interface->endpoint[0], HID_DEVICET_REPORT, &hid_desc)) { printk("wacom: can not retrive extra class descriptor\n"); goto fail2; } } error = wacom_parse_hid(intf, hid_desc, wacom_wac); if (error) goto fail2; } input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_PEN) | BIT_MASK(BTN_TOUCH) | BIT_MASK(BTN_STYLUS); input_set_abs_params(input_dev, ABS_X, 0, features->x_max, 4, 0); input_set_abs_params(input_dev, ABS_Y, 0, features->y_max, 4, 0); input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, 0, 0); if (features->type == TABLETPC) { input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_DOUBLETAP); input_set_abs_params(input_dev, ABS_RX, 0, features->touch_x_max, 4, 0); input_set_abs_params(input_dev, ABS_RY, 0, features->touch_y_max, 4, 0); } input_dev->absbit[BIT_WORD(ABS_MISC)] |= BIT_MASK(ABS_MISC); wacom_init_input_dev(input_dev, wacom_wac); usb_fill_int_urb(wacom->irq, dev, usb_rcvintpipe(dev, endpoint->bEndpointAddress), wacom_wac->data, wacom_wac->features->pktlen, wacom_sys_irq, wacom, endpoint->bInterval); wacom->irq->transfer_dma = wacom->data_dma; wacom->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; error = input_register_device(wacom->dev); if (error) goto fail3; /* * Ask the tablet to report tablet data if it is not a Tablet PC. * Repeat until it succeeds */ if (wacom_wac->features->type != TABLETPC) { do { rep_data[0] = 2; rep_data[1] = 2; error = usb_set_report(intf, WAC_HID_FEATURE_REPORT, 2, rep_data, 2); if (error >= 0) error = usb_get_report(intf, WAC_HID_FEATURE_REPORT, 2, rep_data, 2); } while ((error < 0 || rep_data[1] != 2) && limit++ < 5); } usb_set_intfdata(intf, wacom); return 0; fail3: usb_free_urb(wacom->irq); fail2: usb_buffer_free(dev, 10, wacom_wac->data, wacom->data_dma); fail1: input_free_device(input_dev); kfree(wacom); kfree(wacom_wac); return error; }
int rmnet_usb_ctrl_probe(struct usb_interface *intf, struct usb_host_endpoint *int_in, unsigned long rmnet_devnum, unsigned long *data) { struct rmnet_ctrl_udev *cudev; struct rmnet_ctrl_dev *dev = NULL; u16 wMaxPacketSize; struct usb_endpoint_descriptor *ep; struct usb_device *udev = interface_to_usbdev(intf); int interval; int ret = 0, n; /* Find next available ctrl_dev */ for (n = 0; n < insts_per_dev; n++) { dev = &ctrl_devs[rmnet_devnum][n]; if (!dev->claimed) break; } if (!dev || n == insts_per_dev) { pr_err("%s: No available ctrl devices for %lu\n", __func__, rmnet_devnum); return -ENODEV; } cudev = dev->cudev; cudev->int_pipe = usb_rcvintpipe(udev, int_in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); cudev->intf = intf; cudev->inturb = usb_alloc_urb(0, GFP_KERNEL); if (!cudev->inturb) { dev_err(&intf->dev, "Error allocating int urb\n"); kfree(cudev); return -ENOMEM; } /*use max pkt size from ep desc*/ ep = &cudev->intf->cur_altsetting->endpoint[0].desc; wMaxPacketSize = le16_to_cpu(ep->wMaxPacketSize); cudev->intbuf = kmalloc(wMaxPacketSize, GFP_KERNEL); if (!cudev->intbuf) { usb_free_urb(cudev->inturb); kfree(cudev); dev_err(&intf->dev, "Error allocating int buffer\n"); return -ENOMEM; } cudev->in_ctlreq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE); cudev->in_ctlreq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE; cudev->in_ctlreq->wValue = 0; cudev->in_ctlreq->wIndex = cudev->intf->cur_altsetting->desc.bInterfaceNumber; cudev->in_ctlreq->wLength = cpu_to_le16(DEFAULT_READ_URB_LENGTH); interval = int_in->desc.bInterval; usb_fill_int_urb(cudev->inturb, udev, cudev->int_pipe, cudev->intbuf, wMaxPacketSize, notification_available_cb, cudev, interval); usb_mark_last_busy(udev); mutex_init(&cudev->udev_lock); ret = rmnet_usb_ctrl_start_rx(cudev); if (ret) { usb_free_urb(cudev->inturb); kfree(cudev->intbuf); kfree(cudev); return ret; } *data = (unsigned long)cudev; /* If MUX is enabled, wakeup the open process here */ if (test_bit(RMNET_CTRL_DEV_MUX_EN, &cudev->status)) { set_bit(RMNET_CTRL_DEV_READY, &cudev->status); for (n = 0; n < insts_per_dev; n++) { dev = &ctrl_devs[rmnet_devnum][n]; wake_up(&dev->open_wait_queue); } } else { cudev->ctrldev_id = n; dev->claimed = true; } return 0; }
status_t init_stack() { status_t status = init_domains(); if (status != B_OK) return status; status = init_interfaces(); if (status != B_OK) goto err1; status = init_timers(); if (status != B_OK) goto err2; status = init_notifications(); if (status < B_OK) { // If this fails, it just means there won't be any notifications, // it's not a fatal error. dprintf("networking stack notifications could not be initialized: %s\n", strerror(status)); } module_info* dummy; status = get_module(NET_SOCKET_MODULE_NAME, &dummy); if (status != B_OK) goto err3; mutex_init(&sChainLock, "net chains"); mutex_init(&sInitializeChainLock, "net intialize chains"); sFamilies = hash_init(10, offsetof(struct family, next), &family::Compare, &family::Hash); if (sFamilies == NULL) { status = B_NO_MEMORY; goto err5; } sProtocolChains = hash_init(10, offsetof(struct chain, next), &chain::Compare, &chain::Hash); if (sProtocolChains == NULL) { status = B_NO_MEMORY; goto err6; } sDatalinkProtocolChains = hash_init(10, offsetof(struct chain, next), &chain::Compare, &chain::Hash); if (sDatalinkProtocolChains == NULL) { status = B_NO_MEMORY; goto err7; } sReceivingProtocolChains = hash_init(10, offsetof(struct chain, next), &chain::Compare, &chain::Hash); if (sReceivingProtocolChains == NULL) { status = B_NO_MEMORY; goto err8; } sInitialized = true; link_init(); scan_modules("network/protocols"); scan_modules("network/datalink_protocols"); // TODO: for now! register_domain_datalink_protocols(AF_INET, IFT_LOOP, "network/datalink_protocols/loopback_frame/v1", NULL); register_domain_datalink_protocols(AF_INET, IFT_ETHER, "network/datalink_protocols/ipv4_datagram/v1", "network/datalink_protocols/arp/v1", "network/datalink_protocols/ethernet_frame/v1", NULL); return B_OK; err8: hash_uninit(sDatalinkProtocolChains); err7: hash_uninit(sProtocolChains); err6: hash_uninit(sFamilies); err5: mutex_destroy(&sInitializeChainLock); mutex_destroy(&sChainLock); err3: uninit_timers(); err2: uninit_interfaces(); err1: uninit_domains(); return status; }
int rmnet_usb_ctrl_init(int no_rmnet_devs, int no_rmnet_insts_per_dev, unsigned long mux_info) { struct rmnet_ctrl_dev *dev; struct rmnet_ctrl_udev *cudev; int i, n; int status; int cmux_enabled; num_devs = no_rmnet_devs; insts_per_dev = no_rmnet_insts_per_dev; ctrl_devs = kzalloc(num_devs * sizeof(*ctrl_devs), GFP_KERNEL); if (!ctrl_devs) return -ENOMEM; for (i = 0; i < num_devs; i++) { ctrl_devs[i] = kzalloc(insts_per_dev * sizeof(*ctrl_devs[i]), GFP_KERNEL); if (!ctrl_devs[i]) return -ENOMEM; status = alloc_chrdev_region(&ctrldev_num[i], 0, insts_per_dev, rmnet_dev_names[i]); if (IS_ERR_VALUE(status)) { pr_err("ERROR:%s: alloc_chrdev_region() ret %i.\n", __func__, status); return status; } ctrldev_classp[i] = class_create(THIS_MODULE, rmnet_dev_names[i]); if (IS_ERR(ctrldev_classp[i])) { pr_err("ERROR:%s: class_create() ENOMEM\n", __func__); status = PTR_ERR(ctrldev_classp[i]); return status; } for (n = 0; n < insts_per_dev; n++) { dev = &ctrl_devs[i][n]; /*for debug purpose*/ snprintf(dev->name, CTRL_DEV_MAX_LEN, "%s%d", rmnet_dev_names[i], n); /* ctrl usb dev inits */ cmux_enabled = test_bit(i, &mux_info); if (n && cmux_enabled) /* for mux config one cudev maps to n dev */ goto skip_cudev_init; cudev = kzalloc(sizeof(*cudev), GFP_KERNEL); if (!cudev) { pr_err("Error allocating rmnet usb ctrl dev\n"); kfree(dev); return -ENOMEM; } cudev->rdev_num = i; cudev->wq = create_singlethread_workqueue(dev->name); if (!cudev->wq) { pr_err("unable to allocate workqueue"); kfree(cudev); kfree(dev); return -ENOMEM; } init_usb_anchor(&cudev->tx_submitted); init_usb_anchor(&cudev->rx_submitted); INIT_WORK(&cudev->get_encap_work, get_encap_work); status = rmnet_usb_ctrl_alloc_rx(cudev); if (status) { destroy_workqueue(cudev->wq); kfree(cudev); kfree(dev); return status; } skip_cudev_init: /* ctrl dev inits */ dev->cudev = cudev; if (cmux_enabled) { set_bit(RMNET_CTRL_DEV_MUX_EN, &dev->status); set_bit(RMNET_CTRL_DEV_MUX_EN, &dev->cudev->status); } dev->ch_id = n; mutex_init(&dev->dev_lock); spin_lock_init(&dev->rx_lock); init_waitqueue_head(&dev->read_wait_queue); init_waitqueue_head(&dev->open_wait_queue); INIT_LIST_HEAD(&dev->rx_list); cdev_init(&dev->cdev, &ctrldev_fops); dev->cdev.owner = THIS_MODULE; status = cdev_add(&dev->cdev, (ctrldev_num[i] + n), 1); if (status) { pr_err("%s: cdev_add() ret %i\n", __func__, status); free_rmnet_ctrl_udev(dev->cudev); kfree(dev); return status; } dev->devicep = device_create(ctrldev_classp[i], NULL, (ctrldev_num[i] + n), NULL, "%s%d", rmnet_dev_names[i], n); if (IS_ERR(dev->devicep)) { pr_err("%s: device_create() returned %ld\n", __func__, PTR_ERR(dev->devicep)); cdev_del(&dev->cdev); free_rmnet_ctrl_udev(dev->cudev); kfree(dev); return PTR_ERR(dev->devicep); } /*create /sys/class/hsicctl/hsicctlx/modem_wait*/ status = device_create_file(dev->devicep, &dev_attr_modem_wait); if (status) { device_destroy(dev->devicep->class, dev->devicep->devt); cdev_del(&dev->cdev); free_rmnet_ctrl_udev(dev->cudev); kfree(dev); return status; } dev_set_drvdata(dev->devicep, dev); } } rmnet_usb_ctrl_debugfs_init(); pr_info("rmnet usb ctrl Initialized.\n"); return 0; }
/* * jpegdma_probe - Dma device probe method. * @pdev: Pointer Dma platform device. */ static int jpegdma_probe(struct platform_device *pdev) { struct msm_jpegdma_device *jpegdma; int ret; dev_dbg(&pdev->dev, "jpeg v4l2 DMA probed\n"); /* Jpeg dma device struct */ jpegdma = kzalloc(sizeof(struct msm_jpegdma_device), GFP_KERNEL); if (!jpegdma) return -ENOMEM; mutex_init(&jpegdma->lock); init_completion(&jpegdma->hw_reset_completion); init_completion(&jpegdma->hw_halt_completion); jpegdma->dev = &pdev->dev; /* Get resources */ ret = msm_jpegdma_hw_get_mem_resources(pdev, jpegdma); if (ret < 0) goto error_mem_resources; ret = msm_jpegdma_hw_get_regulators(jpegdma); if (ret < 0) goto error_get_regulators; ret = msm_jpegdma_hw_get_clocks(jpegdma); if (ret < 0) goto error_get_clocks; ret = msm_jpegdma_hw_get_qos(jpegdma); if (ret < 0) goto error_qos_get; ret = msm_jpegdma_hw_get_vbif(jpegdma); if (ret < 0) goto error_vbif_get; ret = msm_jpegdma_hw_get_prefetch(jpegdma); if (ret < 0) goto error_prefetch_get; ret = msm_jpegdma_hw_request_irq(pdev, jpegdma); if (ret < 0) goto error_hw_get_request_irq; ret = msm_jpegdma_hw_get_capabilities(jpegdma); if (ret < 0) goto error_hw_get_request_irq; /* mem2mem device */ jpegdma->m2m_dev = v4l2_m2m_init(&msm_jpegdma_m2m_ops); if (IS_ERR(jpegdma->m2m_dev)) { dev_err(&pdev->dev, "Failed to init mem2mem device\n"); ret = PTR_ERR(jpegdma->m2m_dev); goto error_m2m_init; } /* v4l2 device */ ret = v4l2_device_register(&pdev->dev, &jpegdma->v4l2_dev); if (ret < 0) { dev_err(&pdev->dev, "Failed to register v4l2 device\n"); goto error_v4l2_register; } jpegdma->video.fops = &fd_fops; jpegdma->video.ioctl_ops = &fd_ioctl_ops; jpegdma->video.minor = -1; jpegdma->video.release = video_device_release; jpegdma->video.v4l2_dev = &jpegdma->v4l2_dev; jpegdma->video.vfl_dir = VFL_DIR_M2M; jpegdma->video.vfl_type = VFL_TYPE_GRABBER; strlcpy(jpegdma->video.name, MSM_JPEGDMA_DRV_NAME, sizeof(jpegdma->video.name)); ret = video_register_device(&jpegdma->video, VFL_TYPE_GRABBER, -1); if (ret < 0) { dev_err(&pdev->dev, "Failed to register video device\n"); goto error_video_register; } video_set_drvdata(&jpegdma->video, jpegdma); platform_set_drvdata(pdev, jpegdma); dev_dbg(&pdev->dev, "jpeg v4l2 DMA probe success\n"); return 0; error_video_register: v4l2_device_unregister(&jpegdma->v4l2_dev); error_v4l2_register: v4l2_m2m_release(jpegdma->m2m_dev); error_m2m_init: msm_jpegdma_hw_release_irq(jpegdma); error_hw_get_request_irq: msm_jpegdma_hw_put_prefetch(jpegdma); error_prefetch_get: msm_jpegdma_hw_put_vbif(jpegdma); error_vbif_get: msm_jpegdma_hw_put_qos(jpegdma); error_qos_get: msm_jpegdma_hw_put_clocks(jpegdma); error_get_clocks: msm_jpegdma_hw_put_regulators(jpegdma); error_get_regulators: msm_jpegdma_hw_release_mem_resources(jpegdma); error_mem_resources: kfree(jpegdma); return ret; }
static int lpm_enter_passive_standby(void) { struct lpm_internal_send_msg send_msg = { .command_id = LPM_MSG_ENTER_PASSIVE, .msg_size = 0 }; return lpm_exchange_msg(&send_msg, NULL); } /** * lpm_fw_proto_version() - To get firmware major protocol version * * return major protocol version of firmware */ char lpm_fw_proto_version(void) { return lpm_drv->fw_major_ver; } /** * stm_lpm_probe() - Probe function of driver * @client_data: i2c client data * @id: 2c device id * * Return - 0 on success * Return - negative error on failure */ static int __init stm_lpm_probe(struct i2c_client *client_data, const struct i2c_device_id *id) { struct stm_lpm_i2c_data *i2c_data; int err = 0; struct stm_lpm_version driver_ver, fw_ver; lpm_debug("stm lpm probe \n"); /* Allocate data structure */ lpm_drv = kzalloc(sizeof(struct stm_lpm_driver_data), GFP_KERNEL); if (unlikely(lpm_drv == NULL)) { pr_err("%s: Request memory not done\n", __func__); return -ENOMEM; } i2c_data = i2c_get_clientdata(client_data); if (unlikely(i2c_data == NULL)) { pr_err("No i2c_bus data\n"); err = -ENOENT; goto exit; } lpm_drv->i2c_sbc_adapter = i2c_data->i2c_adap; if (lpm_drv->i2c_sbc_adapter == NULL) { pr_err("i2c adapter not found \n"); err = -ENODEV; goto exit; } lpm_debug("stm lpm i2c adapter found at %d i2c is %x \n", i2c_data->number_i2c, (unsigned int)lpm_drv->i2c_sbc_adapter); /* Mark parent */ client_data->dev.parent = &lpm_drv->i2c_sbc_adapter->dev; /* Mutex initialization */ mutex_init(&lpm_drv->msg_protection_mutex); err = stm_lpm_get_version(&driver_ver, &fw_ver); if (unlikely(err < 0)) { pr_err("No SBC firmware available \n"); goto exit; } lpm_drv->fw_major_ver = fw_ver.major_comm_protocol; #ifdef CONFIG_STM_LPM_RD_MONITOR /* Start monitor front panel power key */ err = lpm_start_power_monitor(client_data); #endif return err; exit: kfree(lpm_drv); return err; } /** * stm_lpm_remove() - To free used resources * @client: i2c client data * Return code 0 */ static int stm_lpm_remove(struct i2c_client *client) { lpm_debug("stm_lpm_remove \n"); #ifdef CONFIG_STM_LPM_RD_MONITOR lpm_stop_power_monitor(client); #endif kfree(lpm_drv); return 0; }
static int tegra_ehci_probe(struct platform_device *pdev) { struct resource *res; struct usb_hcd *hcd; struct tegra_ehci_hcd *tegra; int err = 0; int irq; pr_info("%s: ehci.id = %d\n", __func__, pdev->id); device_ehci_shutdown = false; tegra = devm_kzalloc(&pdev->dev, sizeof(struct tegra_ehci_hcd), GFP_KERNEL); if (!tegra) { dev_err(&pdev->dev, "memory alloc failed\n"); return -ENOMEM; } mutex_init(&tegra->sync_lock); hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { dev_err(&pdev->dev, "unable to create HCD\n"); return -ENOMEM; } platform_set_drvdata(pdev, tegra); //+Sophia:0608 tegra->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(tegra->clk)) { dev_err(&pdev->dev, "Can't get ehci clock\n"); err = PTR_ERR(tegra->clk); goto fail_io; } err = clk_enable(tegra->clk); if (err) goto fail_clken; tegra_periph_reset_assert(tegra->clk); udelay(2); tegra_periph_reset_deassert(tegra->clk); udelay(2); //+Sophia:0608 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "failed to get I/O memory\n"); err = -ENXIO; goto fail_io; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); hcd->regs = ioremap(res->start, resource_size(res)); if (!hcd->regs) { dev_err(&pdev->dev, "failed to remap I/O memory\n"); err = -ENOMEM; goto fail_io; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "failed to get IRQ\n"); err = -ENODEV; goto fail_irq; } set_irq_flags(irq, IRQF_VALID); tegra->irq = irq; tegra->phy = tegra_usb_phy_open(pdev); if (IS_ERR(tegra->phy)) { dev_err(&pdev->dev, "failed to open USB phy\n"); err = -ENXIO; goto fail_irq; } err = tegra_usb_phy_power_on(tegra->phy); if (err) { dev_err(&pdev->dev, "failed to power on the phy\n"); goto fail_phy; } err = tegra_usb_phy_init(tegra->phy); if (err) { dev_err(&pdev->dev, "failed to init the phy\n"); goto fail_phy; } err = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_TRIGGER_HIGH); if (err) { dev_err(&pdev->dev, "Failed to add USB HCD, error=%d\n", err); goto fail_phy; } err = enable_irq_wake(tegra->irq); if (err < 0) { dev_warn(&pdev->dev, "Couldn't enable USB host mode wakeup, irq=%d, " "error=%d\n", irq, err); err = 0; tegra->irq = 0; } tegra->ehci = hcd_to_ehci(hcd); //htc++ #ifdef CONFIG_QCT_9K_MODEM if (Modem_is_QCT_MDM9K()) { extern struct platform_device tegra_ehci2_device; if (&tegra_ehci2_device == pdev) { mdm_hsic_ehci_hcd = tegra->ehci; mdm_hsic_usb_hcd = hcd; mdm_hsic_phy = tegra->phy; pr_info("%s:: mdm_hsic_ehci_hcd = %x, mdm_hsic_usb_hcd = %x, mdm_hsic_phy = %x\n", __func__, (unsigned int)mdm_hsic_ehci_hcd, (unsigned int)mdm_hsic_usb_hcd, (unsigned int)mdm_hsic_phy); } } #endif //CONFIG_QCT_9K_MODEM //htc-- #ifdef CONFIG_USB_OTG_UTILS if (tegra_usb_phy_otg_supported(tegra->phy)) { tegra->transceiver = otg_get_transceiver(); if (tegra->transceiver) otg_set_host(tegra->transceiver, &hcd->self); } #endif return err; fail_phy: tegra_usb_phy_close(tegra->phy); fail_irq: iounmap(hcd->regs); fail_clken: clk_put(tegra->clk); fail_io: usb_put_hcd(hcd); return err; }
static int virtballoon_probe(struct virtio_device *vdev) { struct virtio_balloon *vb; struct address_space *vb_mapping; struct balloon_dev_info *vb_devinfo; int err; vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); if (!vb) { err = -ENOMEM; goto out; } vb->num_pages = 0; mutex_init(&vb->balloon_lock); init_waitqueue_head(&vb->config_change); init_waitqueue_head(&vb->acked); vb->vdev = vdev; vb->need_stats_update = 0; vb_devinfo = balloon_devinfo_alloc(vb); if (IS_ERR(vb_devinfo)) { err = PTR_ERR(vb_devinfo); goto out_free_vb; } vb_mapping = balloon_mapping_alloc(vb_devinfo, (balloon_compaction_check()) ? &virtio_balloon_aops : NULL); if (IS_ERR(vb_mapping)) { /* * IS_ERR(vb_mapping) && PTR_ERR(vb_mapping) == -EOPNOTSUPP * This means !CONFIG_BALLOON_COMPACTION, otherwise we get off. */ err = PTR_ERR(vb_mapping); if (err != -EOPNOTSUPP) goto out_free_vb_devinfo; } vb->vb_dev_info = vb_devinfo; err = init_vqs(vb); if (err) goto out_free_vb_mapping; vb->thread = kthread_run(balloon, vb, "vballoon"); if (IS_ERR(vb->thread)) { err = PTR_ERR(vb->thread); goto out_del_vqs; } return 0; out_del_vqs: vdev->config->del_vqs(vdev); out_free_vb_mapping: balloon_mapping_free(vb_mapping); out_free_vb_devinfo: balloon_devinfo_free(vb_devinfo); out_free_vb: kfree(vb); out: return err; }
int zfs_sb_create(const char *osname, zfs_sb_t **zsbp) { objset_t *os; zfs_sb_t *zsb; uint64_t zval; int i, error; uint64_t sa_obj; zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_SLEEP | KM_NODEBUG); /* * We claim to always be readonly so we can open snapshots; * other ZPL code will prevent us from writing to snapshots. */ error = dmu_objset_own(osname, DMU_OST_ZFS, B_TRUE, zsb, &os); if (error) { kmem_free(zsb, sizeof (zfs_sb_t)); return (error); } /* * Initialize the zfs-specific filesystem structure. * Should probably make this a kmem cache, shuffle fields, * and just bzero up to z_hold_mtx[]. */ zsb->z_sb = NULL; zsb->z_parent = zsb; zsb->z_max_blksz = SPA_MAXBLOCKSIZE; zsb->z_show_ctldir = ZFS_SNAPDIR_VISIBLE; zsb->z_os = os; error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zsb->z_version); if (error) { goto out; } else if (zsb->z_version > zfs_zpl_version_map(spa_version(dmu_objset_spa(os)))) { (void) printk("Can't mount a version %lld file system " "on a version %lld pool\n. Pool must be upgraded to mount " "this file system.", (u_longlong_t)zsb->z_version, (u_longlong_t)spa_version(dmu_objset_spa(os))); error = ENOTSUP; goto out; } if ((error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &zval)) != 0) goto out; zsb->z_norm = (int)zval; if ((error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &zval)) != 0) goto out; zsb->z_utf8 = (zval != 0); if ((error = zfs_get_zplprop(os, ZFS_PROP_CASE, &zval)) != 0) goto out; zsb->z_case = (uint_t)zval; /* * Fold case on file systems that are always or sometimes case * insensitive. */ if (zsb->z_case == ZFS_CASE_INSENSITIVE || zsb->z_case == ZFS_CASE_MIXED) zsb->z_norm |= U8_TEXTPREP_TOUPPER; zsb->z_use_fuids = USE_FUIDS(zsb->z_version, zsb->z_os); zsb->z_use_sa = USE_SA(zsb->z_version, zsb->z_os); if (zsb->z_use_sa) { /* should either have both of these objects or none */ error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj); if (error) goto out; error = zfs_get_zplprop(os, ZFS_PROP_XATTR, &zval); if ((error == 0) && (zval == ZFS_XATTR_SA)) zsb->z_xattr_sa = B_TRUE; } else { /* * Pre SA versions file systems should never touch * either the attribute registration or layout objects. */ sa_obj = 0; } error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END, &zsb->z_attr_table); if (error) goto out; if (zsb->z_version >= ZPL_VERSION_SA) sa_register_update_callback(os, zfs_sa_upgrade); error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1, &zsb->z_root); if (error) goto out; ASSERT(zsb->z_root != 0); error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1, &zsb->z_unlinkedobj); if (error) goto out; error = zap_lookup(os, MASTER_NODE_OBJ, zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA], 8, 1, &zsb->z_userquota_obj); if (error && error != ENOENT) goto out; error = zap_lookup(os, MASTER_NODE_OBJ, zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA], 8, 1, &zsb->z_groupquota_obj); if (error && error != ENOENT) goto out; error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1, &zsb->z_fuid_obj); if (error && error != ENOENT) goto out; error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1, &zsb->z_shares_dir); if (error && error != ENOENT) goto out; mutex_init(&zsb->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&zsb->z_lock, NULL, MUTEX_DEFAULT, NULL); list_create(&zsb->z_all_znodes, sizeof (znode_t), offsetof(znode_t, z_link_node)); rrw_init(&zsb->z_teardown_lock); rw_init(&zsb->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL); rw_init(&zsb->z_fuid_lock, NULL, RW_DEFAULT, NULL); for (i = 0; i != ZFS_OBJ_MTX_SZ; i++) mutex_init(&zsb->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL); avl_create(&zsb->z_ctldir_snaps, snapentry_compare, sizeof (zfs_snapentry_t), offsetof(zfs_snapentry_t, se_node)); mutex_init(&zsb->z_ctldir_lock, NULL, MUTEX_DEFAULT, NULL); *zsbp = zsb; return (0); out: dmu_objset_disown(os, zsb); *zsbp = NULL; kmem_free(zsb, sizeof (zfs_sb_t)); return (error); }
static int iphone_wm8758_register(void) { int ret; int i; struct snd_soc_codec *codec = &priv.codec; pr_debug("ENTER iphone_wm8758_audio_probe\n"); mutex_init(&codec->mutex); INIT_LIST_HEAD(&codec->dapm_widgets); INIT_LIST_HEAD(&codec->dapm_paths); codec->name = "wm8758"; codec->owner = THIS_MODULE; codec->dai = &iphone_wm8758_dai; codec->num_dai = 1; codec->reg_cache_size = ARRAY_SIZE(priv.reg_cache); codec->reg_cache = &priv.reg_cache; iphone_wm8758_dai.private_data = &priv; iphone_wm8758_dai.dev = codec->dev; memcpy(codec->reg_cache, wm8978_reg, sizeof(wm8978_reg)); ret = snd_soc_codec_set_cache_io(codec, 7, 9, SND_SOC_I2C); if (ret < 0) { dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); goto err; } ret = snd_soc_register_codec(codec); if (ret != 0) { dev_err(codec->dev, "Failed to register codec: %d\n", ret); goto err; } ret = snd_soc_register_dai(&iphone_wm8758_dai); if (ret != 0) { dev_err(codec->dev, "Failed to register DAI: %d\n", ret); snd_soc_unregister_codec(codec); goto err_codec; } priv.bb_volume_cache[2] = 100; priv.bb_volume_cache[0] = 68; snd_soc_write(codec, RESET, 0x1ff); /* Reset */ snd_soc_write(codec, LOUT1VOL, 0xc0); snd_soc_write(codec, ROUT1VOL, 0x1c0); snd_soc_write(codec, LOUT2VOL, 0xb9); snd_soc_write(codec, ROUT2VOL, 0x1b9); snd_soc_write(codec, BIASCTL, 0x100); /* BIASCUT = 1 */ snd_soc_write(codec, PWRMGMT1, 0x2d); /* BIASEN = 1, PLLEN = 1, BUFIOEN = 1, VMIDSEL = 1 */ snd_soc_write(codec, PWRMGMT2, 0x180); snd_soc_write(codec, PWRMGMT3, 0x6f); snd_soc_write(codec, AINTFCE, 0x10); /* 16-bit, I2S format */ snd_soc_write(codec, COMPAND, 0x0); snd_soc_write(codec, CLKGEN, 0x14d); snd_soc_write(codec, SRATECTRL, 0x0); snd_soc_write(codec, GPIOCTL, 0x0); snd_soc_write(codec, JACKDETECT0, 0x0); snd_soc_write(codec, DACCTRL, 0x3); snd_soc_write(codec, LDACVOL, 0xff); snd_soc_write(codec, RDACVOL, 0x1ff); snd_soc_write(codec, JACKDETECT1, 0x0); snd_soc_write(codec, ADCCTL, 0x0); snd_soc_write(codec, LADCVOL, 0xff); snd_soc_write(codec, RADCVOL, 0xff); snd_soc_write(codec, EQ1, 0x12c); snd_soc_write(codec, EQ2, 0x2c); snd_soc_write(codec, EQ3, 0x2c); snd_soc_write(codec, EQ4, 0x2c); snd_soc_write(codec, EQ5, 0x2c); snd_soc_write(codec, DACLIMIT1, 0x32); snd_soc_write(codec, DACLIMIT2, 0x0); snd_soc_write(codec, NOTCH1, 0x0); snd_soc_write(codec, NOTCH2, 0x0); snd_soc_write(codec, NOTCH3, 0x0); snd_soc_write(codec, NOTCH4, 0x0); snd_soc_write(codec, PLLN, 0xa); snd_soc_write(codec, PLLK1, 0x1); snd_soc_write(codec, PLLK2, 0x1fd); snd_soc_write(codec, PLLK3, 0x1e8); snd_soc_write(codec, THREEDCTL, 0x0); snd_soc_write(codec, OUT4ADC, 0x0); snd_soc_write(codec, BEEPCTRL, 0x0); snd_soc_write(codec, INCTRL, 0x0); snd_soc_write(codec, LINPGAGAIN, 0x40); snd_soc_write(codec, RINPGAGAIN, 0x140); snd_soc_write(codec, LADCBOOST, 0x0); snd_soc_write(codec, RADCBOOST, 0x0); snd_soc_write(codec, OUTCTRL, 0x186); /* Thermal shutdown, DACL2RMIX = 1, DACR2LMIX = 1, SPKBOOST = 1 */ snd_soc_write(codec, LOUTMIX, 0x15); snd_soc_write(codec, ROUTMIX, 0x15); snd_soc_write(codec, OUT3MIX, 0x40); snd_soc_write(codec, OUT4MIX, 0x40); snd_soc_write(codec, WMREG_3E, 0x8c90); for (i = 0; i < ARRAY_SIZE(update_reg); i++) ((u16 *)codec->reg_cache)[update_reg[i]] |= 0x100; dev_info(codec->dev, "DAI and codec registered\n"); return 0; err_codec: snd_soc_unregister_codec(codec); err: return ret; }
/* * probe for dryice rtc device */ static int dryice_rtc_probe(struct platform_device *pdev) { struct resource *res; struct imxdi_dev *imxdi; int rc; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; imxdi = devm_kzalloc(&pdev->dev, sizeof(*imxdi), GFP_KERNEL); if (!imxdi) return -ENOMEM; imxdi->pdev = pdev; if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), pdev->name)) return -EBUSY; imxdi->ioaddr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (imxdi->ioaddr == NULL) return -ENOMEM; imxdi->irq = platform_get_irq(pdev, 0); if (imxdi->irq < 0) return imxdi->irq; init_waitqueue_head(&imxdi->write_wait); INIT_WORK(&imxdi->work, dryice_work); mutex_init(&imxdi->write_mutex); imxdi->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(imxdi->clk)) return PTR_ERR(imxdi->clk); clk_enable(imxdi->clk); /* * Initialize dryice hardware */ /* mask all interrupts */ __raw_writel(0, imxdi->ioaddr + DIER); rc = devm_request_irq(&pdev->dev, imxdi->irq, dryice_norm_irq, IRQF_SHARED, pdev->name, imxdi); if (rc) { dev_warn(&pdev->dev, "interrupt not available.\n"); goto err; } /* put dryice into valid state */ if (__raw_readl(imxdi->ioaddr + DSR) & DSR_NVF) { rc = di_write_wait(imxdi, DSR_NVF | DSR_SVF, DSR); if (rc) goto err; } /* initialize alarm */ rc = di_write_wait(imxdi, DCAMR_UNSET, DCAMR); if (rc) goto err; rc = di_write_wait(imxdi, 0, DCALR); if (rc) goto err; /* clear alarm flag */ if (__raw_readl(imxdi->ioaddr + DSR) & DSR_CAF) { rc = di_write_wait(imxdi, DSR_CAF, DSR); if (rc) goto err; } /* the timer won't count if it has never been written to */ if (__raw_readl(imxdi->ioaddr + DTCMR) == 0) { rc = di_write_wait(imxdi, 0, DTCMR); if (rc) goto err; } /* start keeping time */ if (!(__raw_readl(imxdi->ioaddr + DCR) & DCR_TCE)) { rc = di_write_wait(imxdi, __raw_readl(imxdi->ioaddr + DCR) | DCR_TCE, DCR); if (rc) goto err; } platform_set_drvdata(pdev, imxdi); imxdi->rtc = rtc_device_register(pdev->name, &pdev->dev, &dryice_rtc_ops, THIS_MODULE); if (IS_ERR(imxdi->rtc)) { rc = PTR_ERR(imxdi->rtc); goto err; } return 0; err: clk_disable(imxdi->clk); clk_put(imxdi->clk); return rc; }
int vsp_init(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; struct ttm_bo_device *bdev = &dev_priv->bdev; struct vsp_private *vsp_priv; bool is_iomem; int ret; unsigned int context_size; int i = 0; VSP_DEBUG("init vsp private data structure\n"); vsp_priv = kmalloc(sizeof(struct vsp_private), GFP_KERNEL); if (vsp_priv == NULL) return -1; memset(vsp_priv, 0, sizeof(*vsp_priv)); /* get device --> drm_device --> drm_psb_private --> vsp_priv * for psb_vsp_pmstate_show: vsp_pmpolicy * if not pci_set_drvdata, can't get drm_device from device */ /* pci_set_drvdata(dev->pdev, dev); */ if (device_create_file(&dev->pdev->dev, &dev_attr_vsp_pmstate)) DRM_ERROR("TOPAZ: could not create sysfs file\n"); vsp_priv->sysfs_pmstate = sysfs_get_dirent( dev->pdev->dev.kobj.sd, NULL, "vsp_pmstate"); vsp_priv->vsp_cmd_num = 0; vsp_priv->fw_loaded = VSP_FW_NONE; vsp_priv->current_sequence = 0; vsp_priv->vsp_state = VSP_STATE_DOWN; vsp_priv->dev = dev; atomic_set(&dev_priv->vsp_mmu_invaldc, 0); dev_priv->vsp_private = vsp_priv; vsp_priv->cmd_queue_sz = VSP_CMD_QUEUE_SIZE * sizeof(struct vss_command_t); #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) ret = ttm_buffer_object_create(bdev, vsp_priv->cmd_queue_sz, ttm_bo_type_kernel, DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT, 0, 0, 0, NULL, &vsp_priv->cmd_queue_bo); #else ret = ttm_buffer_object_create(bdev, vsp_priv->cmd_queue_sz, ttm_bo_type_kernel, DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT, 0, 0, NULL, &vsp_priv->cmd_queue_bo); #endif if (ret != 0) { DRM_ERROR("VSP: failed to allocate VSP cmd queue\n"); goto out_clean; } vsp_priv->ack_queue_sz = VSP_ACK_QUEUE_SIZE * sizeof(struct vss_response_t); #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) ret = ttm_buffer_object_create(bdev, vsp_priv->ack_queue_sz, ttm_bo_type_kernel, DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT, 0, 0, 0, NULL, &vsp_priv->ack_queue_bo); #else ret = ttm_buffer_object_create(bdev, vsp_priv->ack_queue_sz, ttm_bo_type_kernel, DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT, 0, 0, NULL, &vsp_priv->ack_queue_bo); #endif if (ret != 0) { DRM_ERROR("VSP: failed to allocate VSP cmd ack queue\n"); goto out_clean; } /* Create setting buffer */ #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) ret = ttm_buffer_object_create(bdev, sizeof(struct vsp_settings_t), ttm_bo_type_kernel, DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT, 0, 0, 0, NULL, &vsp_priv->setting_bo); #else ret = ttm_buffer_object_create(bdev, sizeof(struct vsp_settings_t), ttm_bo_type_kernel, DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT, 0, 0, NULL, &vsp_priv->setting_bo); #endif if (ret != 0) { DRM_ERROR("VSP: failed to allocate VSP setting buffer\n"); goto out_clean; } /* map cmd queue */ ret = ttm_bo_kmap(vsp_priv->cmd_queue_bo, 0, vsp_priv->cmd_queue_bo->num_pages, &vsp_priv->cmd_kmap); if (ret) { DRM_ERROR("drm_bo_kmap failed: %d\n", ret); ttm_bo_unref(&vsp_priv->cmd_queue_bo); ttm_bo_kunmap(&vsp_priv->cmd_kmap); goto out_clean; } vsp_priv->cmd_queue = ttm_kmap_obj_virtual(&vsp_priv->cmd_kmap, &is_iomem); /* map ack queue */ ret = ttm_bo_kmap(vsp_priv->ack_queue_bo, 0, vsp_priv->ack_queue_bo->num_pages, &vsp_priv->ack_kmap); if (ret) { DRM_ERROR("drm_bo_kmap failed: %d\n", ret); ttm_bo_unref(&vsp_priv->ack_queue_bo); ttm_bo_kunmap(&vsp_priv->ack_kmap); goto out_clean; } vsp_priv->ack_queue = ttm_kmap_obj_virtual(&vsp_priv->ack_kmap, &is_iomem); /* map vsp setting */ ret = ttm_bo_kmap(vsp_priv->setting_bo, 0, vsp_priv->setting_bo->num_pages, &vsp_priv->setting_kmap); if (ret) { DRM_ERROR("drm_bo_kmap setting_bo failed: %d\n", ret); ttm_bo_unref(&vsp_priv->setting_bo); ttm_bo_kunmap(&vsp_priv->setting_kmap); goto out_clean; } vsp_priv->setting = ttm_kmap_obj_virtual(&vsp_priv->setting_kmap, &is_iomem); for (i = 0; i < MAX_VP8_CONTEXT_NUM + 1; i++) vsp_priv->vp8_filp[i] = NULL; vsp_priv->context_vp8_num = 0; vsp_priv->context_vpp_num = 0; vsp_priv->vp8_cmd_num = 0; spin_lock_init(&vsp_priv->lock); mutex_init(&vsp_priv->vsp_mutex); INIT_DELAYED_WORK(&vsp_priv->vsp_suspend_wq, &psb_powerdown_vsp); INIT_DELAYED_WORK(&vsp_priv->vsp_irq_wq, &vsp_irq_task); return 0; out_clean: vsp_deinit(dev); return -1; }
static int lm3630_probe(struct i2c_client *i2c_dev, const struct i2c_device_id *id) { struct backlight_platform_data *pdata; struct lm3630_device *dev; struct backlight_device *bl_dev; struct backlight_properties props; int err; pr_info("[LCD][DEBUG] %s: i2c probe start\n", __func__); #ifdef CONFIG_OF if (&i2c_dev->dev.of_node) { pdata = devm_kzalloc(&i2c_dev->dev, sizeof(struct backlight_platform_data), GFP_KERNEL); if (!pdata) { pr_err("%s: Failed to allocate memory\n", __func__); return -ENOMEM; } err = lm3630_parse_dt(&i2c_dev->dev, pdata); if (err != 0) return err; } else { pdata = i2c_dev->dev.platform_data; } #else pdata = i2c_dev->dev.platform_data; #endif pr_debug("[LCD][DEBUG] %s: gpio = %d\n", __func__,pdata->gpio); if (pdata->gpio && gpio_request(pdata->gpio, "lm3630 reset") != 0) { return -ENODEV; } lm3630_i2c_client = i2c_dev; dev = kzalloc(sizeof(struct lm3630_device), GFP_KERNEL); if (dev == NULL) { dev_err(&i2c_dev->dev, "fail alloc for lm3630_device\n"); return 0; } main_lm3630_dev = dev; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = MAX_BRIGHTNESS_LM3630; bl_dev = backlight_device_register(I2C_BL_NAME, &i2c_dev->dev, NULL, &lm3630_bl_ops, &props); bl_dev->props.max_brightness = MAX_BRIGHTNESS_LM3630; bl_dev->props.brightness = DEFAULT_BRIGHTNESS; bl_dev->props.power = FB_BLANK_UNBLANK; dev->bl_dev = bl_dev; dev->client = i2c_dev; dev->gpio = pdata->gpio; dev->max_current = pdata->max_current; dev->min_brightness = pdata->min_brightness; dev->default_brightness = pdata->default_brightness; dev->max_brightness = pdata->max_brightness; dev->blmap_size = pdata->blmap_size; if (dev->blmap_size) { dev->blmap = kzalloc(sizeof(char) * dev->blmap_size, GFP_KERNEL); if (!dev->blmap) { pr_err("%s: Failed to allocate memory\n", __func__); return -ENOMEM; } memcpy(dev->blmap, pdata->blmap, dev->blmap_size); } else { dev->blmap = NULL; } #if !defined(CONFIG_MACH_MSM8974_VU3_KR) if (gpio_get_value(dev->gpio)) backlight_status = BL_ON; else #endif backlight_status = BL_OFF; i2c_set_clientdata(i2c_dev, dev); mutex_init(&dev->bl_mutex); err = device_create_file(&i2c_dev->dev, &dev_attr_lm3630_level); err = device_create_file(&i2c_dev->dev, &dev_attr_lm3630_backlight_on_off); err = device_create_file(&i2c_dev->dev, &dev_attr_lm3630_exp_min_value); #if defined(CONFIG_BACKLIGHT_CABC_DEBUG_ENABLE) err = device_create_file(&i2c_dev->dev, &dev_attr_lm3630_pwm); #endif #if defined(CONFIG_MACH_LGE) && !defined(CONFIG_MACH_MSM8974_VU3_KR) && !defined(CONFIG_MACH_MSM8974_Z_KR) && !defined(CONFIG_MACH_MSM8974_Z_US) && !defined(CONFIG_OLED_SUPPORT) if (!lge_get_cont_splash_enabled()) lm3630_lcd_backlight_set_level(0); #endif return 0; }
int dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, objset_t **osp) { objset_t *os; int i, err; ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock)); os = kmem_zalloc(sizeof (objset_t), KM_SLEEP); os->os_dsl_dataset = ds; os->os_spa = spa; os->os_rootbp = bp; if (!BP_IS_HOLE(os->os_rootbp)) { arc_flags_t aflags = ARC_FLAG_WAIT; zbookmark_phys_t zb; SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); if (DMU_OS_IS_L2CACHEABLE(os)) aflags |= ARC_FLAG_L2CACHE; dprintf_bp(os->os_rootbp, "reading %s", ""); err = arc_read(NULL, spa, os->os_rootbp, arc_getbuf_func, &os->os_phys_buf, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb); if (err != 0) { kmem_free(os, sizeof (objset_t)); /* convert checksum errors into IO errors */ if (err == ECKSUM) err = SET_ERROR(EIO); return (err); } /* Increase the blocksize if we are permitted. */ if (spa_version(spa) >= SPA_VERSION_USERSPACE && arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) { arc_buf_t *buf = arc_alloc_buf(spa, sizeof (objset_phys_t), &os->os_phys_buf, ARC_BUFC_METADATA); bzero(buf->b_data, sizeof (objset_phys_t)); bcopy(os->os_phys_buf->b_data, buf->b_data, arc_buf_size(os->os_phys_buf)); arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf); os->os_phys_buf = buf; } os->os_phys = os->os_phys_buf->b_data; os->os_flags = os->os_phys->os_flags; } else { int size = spa_version(spa) >= SPA_VERSION_USERSPACE ? sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE; os->os_phys_buf = arc_alloc_buf(spa, size, &os->os_phys_buf, ARC_BUFC_METADATA); os->os_phys = os->os_phys_buf->b_data; bzero(os->os_phys, size); } /* * Note: the changed_cb will be called once before the register * func returns, thus changing the checksum/compression from the * default (fletcher2/off). Snapshots don't need to know about * checksum/compression/copies. */ if (ds != NULL) { boolean_t needlock = B_FALSE; /* * Note: it's valid to open the objset if the dataset is * long-held, in which case the pool_config lock will not * be held. */ if (!dsl_pool_config_held(dmu_objset_pool(os))) { needlock = B_TRUE; dsl_pool_config_enter(dmu_objset_pool(os), FTAG); } err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE), primary_cache_changed_cb, os); if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE), secondary_cache_changed_cb, os); } if (!ds->ds_is_snapshot) { if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_COMPRESSION), compression_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_COPIES), copies_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_DEDUP), dedup_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_LOGBIAS), logbias_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_SYNC), sync_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name( ZFS_PROP_REDUNDANT_METADATA), redundant_metadata_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_RECORDSIZE), recordsize_changed_cb, os); } } if (needlock) dsl_pool_config_exit(dmu_objset_pool(os), FTAG); if (err != 0) { arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf); kmem_free(os, sizeof (objset_t)); return (err); } } else { /* It's the meta-objset. */ os->os_checksum = ZIO_CHECKSUM_FLETCHER_4; os->os_compress = ZIO_COMPRESS_ON; os->os_copies = spa_max_replication(spa); os->os_dedup_checksum = ZIO_CHECKSUM_OFF; os->os_dedup_verify = B_FALSE; os->os_logbias = ZFS_LOGBIAS_LATENCY; os->os_sync = ZFS_SYNC_STANDARD; os->os_primary_cache = ZFS_CACHE_ALL; os->os_secondary_cache = ZFS_CACHE_ALL; } if (ds == NULL || !ds->ds_is_snapshot) os->os_zil_header = os->os_phys->os_zil_header; os->os_zil = zil_alloc(os, &os->os_zil_header); for (i = 0; i < TXG_SIZE; i++) { list_create(&os->os_dirty_dnodes[i], sizeof (dnode_t), offsetof(dnode_t, dn_dirty_link[i])); list_create(&os->os_free_dnodes[i], sizeof (dnode_t), offsetof(dnode_t, dn_dirty_link[i])); } list_create(&os->os_dnodes, sizeof (dnode_t), offsetof(dnode_t, dn_link)); list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t), offsetof(dmu_buf_impl_t, db_link)); mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL); dnode_special_open(os, &os->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT, &os->os_meta_dnode); if (arc_buf_size(os->os_phys_buf) >= sizeof (objset_phys_t)) { dnode_special_open(os, &os->os_phys->os_userused_dnode, DMU_USERUSED_OBJECT, &os->os_userused_dnode); dnode_special_open(os, &os->os_phys->os_groupused_dnode, DMU_GROUPUSED_OBJECT, &os->os_groupused_dnode); } *osp = os; return (0); }
static int elo_connect(struct serio *serio, struct serio_driver *drv) { struct elo *elo; struct input_dev *input_dev; int err; elo = kzalloc(sizeof(struct elo), GFP_KERNEL); input_dev = input_allocate_device(); if (!elo || !input_dev) { err = -ENOMEM; goto fail1; } elo->serio = serio; elo->id = serio->id.id; elo->dev = input_dev; elo->expected_packet = ELO10_TOUCH_PACKET; mutex_init(&elo->cmd_mutex); init_completion(&elo->cmd_done); snprintf(elo->phys, sizeof(elo->phys), "%s/input0", serio->phys); input_dev->name = "Elo Serial TouchScreen"; input_dev->phys = elo->phys; input_dev->id.bustype = BUS_RS232; input_dev->id.vendor = SERIO_ELO; input_dev->id.product = elo->id; input_dev->id.version = 0x0100; input_dev->dev.parent = &serio->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); serio_set_drvdata(serio, elo); err = serio_open(serio, drv); if (err) goto fail2; switch (elo->id) { case 0: /* 10-byte protocol */ if (elo_setup_10(elo)) goto fail3; break; case 1: /* 6-byte protocol */ input_set_abs_params(input_dev, ABS_PRESSURE, 0, 15, 0, 0); case 2: /* 4-byte protocol */ input_set_abs_params(input_dev, ABS_X, 96, 4000, 0, 0); input_set_abs_params(input_dev, ABS_Y, 96, 4000, 0, 0); break; case 3: /* 3-byte protocol */ input_set_abs_params(input_dev, ABS_X, 0, 255, 0, 0); input_set_abs_params(input_dev, ABS_Y, 0, 255, 0, 0); break; } err = input_register_device(elo->dev); if (err) goto fail3; return 0; fail3: serio_close(serio); fail2: serio_set_drvdata(serio, NULL); fail1: input_free_device(input_dev); kfree(elo); return err; }
static int max8997_muic_probe(struct platform_device *pdev) { struct max8997_dev *max8997 = dev_get_drvdata(pdev->dev.parent); struct max8997_platform_data *pdata = dev_get_platdata(max8997->dev); struct max8997_muic_info *info; int delay_jiffies; int ret, i; info = devm_kzalloc(&pdev->dev, sizeof(struct max8997_muic_info), GFP_KERNEL); if (!info) return -ENOMEM; info->dev = &pdev->dev; info->muic = max8997->muic; platform_set_drvdata(pdev, info); mutex_init(&info->mutex); INIT_WORK(&info->irq_work, max8997_muic_irq_work); for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) { struct max8997_muic_irq *muic_irq = &muic_irqs[i]; unsigned int virq = 0; virq = irq_create_mapping(max8997->irq_domain, muic_irq->irq); if (!virq) { ret = -EINVAL; goto err_irq; } muic_irq->virq = virq; ret = request_threaded_irq(virq, NULL, max8997_muic_irq_handler, IRQF_NO_SUSPEND, muic_irq->name, info); if (ret) { dev_err(&pdev->dev, "failed: irq request (IRQ: %d, error :%d)\n", muic_irq->irq, ret); goto err_irq; } } /* External connector */ info->edev = devm_extcon_dev_allocate(&pdev->dev, max8997_extcon_cable); if (IS_ERR(info->edev)) { dev_err(&pdev->dev, "failed to allocate memory for extcon\n"); ret = -ENOMEM; goto err_irq; } ret = devm_extcon_dev_register(&pdev->dev, info->edev); if (ret) { dev_err(&pdev->dev, "failed to register extcon device\n"); goto err_irq; } if (pdata && pdata->muic_pdata) { struct max8997_muic_platform_data *muic_pdata = pdata->muic_pdata; /* Initialize registers according to platform data */ for (i = 0; i < muic_pdata->num_init_data; i++) { max8997_write_reg(info->muic, muic_pdata->init_data[i].addr, muic_pdata->init_data[i].data); } /* * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB * h/w path of COMP2/COMN1 on CONTROL1 register. */ if (muic_pdata->path_uart) info->path_uart = muic_pdata->path_uart; else info->path_uart = CONTROL1_SW_UART; if (muic_pdata->path_usb) info->path_usb = muic_pdata->path_usb; else info->path_usb = CONTROL1_SW_USB; /* * Default delay time for detecting cable state * after certain time. */ if (muic_pdata->detcable_delay_ms) delay_jiffies = msecs_to_jiffies(muic_pdata->detcable_delay_ms); else delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT); } else { info->path_uart = CONTROL1_SW_UART; info->path_usb = CONTROL1_SW_USB; delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT); } /* Set initial path for UART */ max8997_muic_set_path(info, info->path_uart, true); /* Set ADC debounce time */ max8997_muic_set_debounce_time(info, ADC_DEBOUNCE_TIME_25MS); /* * Detect accessory after completing the initialization of platform * * - Use delayed workqueue to detect cable state and then * notify cable state to notifiee/platform through uevent. * After completing the booting of platform, the extcon provider * driver should notify cable state to upper layer. */ INIT_DELAYED_WORK(&info->wq_detcable, max8997_muic_detect_cable_wq); queue_delayed_work(system_power_efficient_wq, &info->wq_detcable, delay_jiffies); return 0; err_irq: while (--i >= 0) free_irq(muic_irqs[i].virq, info); return ret; }