static int tegra_otg_probe(struct platform_device *pdev) { struct tegra_otg_data *tegra; struct resource *res; int err; tegra = kzalloc(sizeof(struct tegra_otg_data), GFP_KERNEL); if (!tegra) return -ENOMEM; tegra->otg.dev = &pdev->dev; tegra->otg.label = "tegra-otg"; tegra->otg.state = OTG_STATE_UNDEFINED; tegra->otg.set_host = tegra_otg_set_host; tegra->otg.set_peripheral = tegra_otg_set_peripheral; tegra->otg.set_suspend = tegra_otg_set_suspend; tegra->otg.set_power = tegra_otg_set_power; spin_lock_init(&tegra->lock); wake_lock_init(&usb_wake_lock, WAKE_LOCK_SUSPEND, "tegra_otg"); platform_set_drvdata(pdev, tegra); tegra->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(tegra->clk)) { dev_err(&pdev->dev, "Can't get otg clock\n"); err = PTR_ERR(tegra->clk); goto err_clk; } err = clk_enable(tegra->clk); if (err) goto err_clken; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "Failed to get I/O memory\n"); err = -ENXIO; goto err_io; } tegra->regs = ioremap(res->start, resource_size(res)); if (!tegra->regs) { err = -ENOMEM; goto err_io; } tegra->otg.state = OTG_STATE_A_SUSPEND; err = otg_set_transceiver(&tegra->otg); if (err) { dev_err(&pdev->dev, "can't register transceiver (%d)\n", err); goto err_otg; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(&pdev->dev, "Failed to get IRQ\n"); err = -ENXIO; goto err_irq; } tegra->irq = res->start; err = request_threaded_irq(tegra->irq, tegra_otg_irq, NULL, IRQF_SHARED, "tegra-otg", tegra); if (err) { dev_err(&pdev->dev, "Failed to register IRQ\n"); goto err_irq; } INIT_WORK (&tegra->work, irq_work); dev_info(&pdev->dev, "otg transceiver registered\n"); return 0; err_irq: otg_set_transceiver(NULL); err_otg: iounmap(tegra->regs); err_io: clk_disable(tegra->clk); err_clken: clk_put(tegra->clk); err_clk: platform_set_drvdata(pdev, NULL); kfree(tegra); return err; }
static int __init msm_serial_hsl_probe(struct platform_device *pdev) { struct msm_hsl_port *msm_hsl_port; struct resource *uart_resource; struct resource *gsbi_resource; struct uart_port *port; int ret; if (unlikely(pdev->id < 0 || pdev->id >= UART_NR)) return -ENXIO; printk(KERN_INFO "msm_serial_hsl: detected port #%d\n", pdev->id); port = get_port_from_line(pdev->id); port->dev = &pdev->dev; msm_hsl_port = UART_TO_MSM(port); if (msm_serial_hsl_has_gsbi()) { gsbi_resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsbi_resource"); if (unlikely(!gsbi_resource)) return -ENXIO; msm_hsl_port->clk = clk_get(&pdev->dev, "gsbi_uart_clk"); msm_hsl_port->pclk = clk_get(&pdev->dev, "gsbi_pclk"); } else { msm_hsl_port->clk = clk_get(&pdev->dev, "uartdm_clk"); msm_hsl_port->pclk = NULL; } if (unlikely(IS_ERR(msm_hsl_port->clk))) { printk(KERN_ERR "%s: Error getting clk\n", __func__); return PTR_ERR(msm_hsl_port->clk); } if (unlikely(IS_ERR(msm_hsl_port->pclk))) { printk(KERN_ERR "%s: Error getting pclk\n", __func__); return PTR_ERR(msm_hsl_port->pclk); } uart_resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, "uartdm_resource"); if (unlikely(!uart_resource)) { printk(KERN_ERR "getting uartdm_resource failed\n"); return -ENXIO; } port->mapbase = uart_resource->start; port->irq = platform_get_irq(pdev, 0); if (unlikely(port->irq < 0)) { printk(KERN_ERR "%s: getting irq failed\n", __func__); return -ENXIO; } device_set_wakeup_capable(&pdev->dev, 1); platform_set_drvdata(pdev, port); pm_runtime_enable(port->dev); ret = uart_add_one_port(&msm_hsl_uart_driver, port); return ret; }
void ext4_inline_data_truncate(struct inode *inode, int *has_inline) { handle_t *handle; int inline_size, value_len, needed_blocks; size_t i_size; void *value = NULL; struct ext4_xattr_ibody_find is = { .s = { .not_found = -ENODATA, }, }; struct ext4_xattr_info i = { .name_index = EXT4_XATTR_INDEX_SYSTEM, .name = EXT4_XATTR_SYSTEM_DATA, }; needed_blocks = ext4_writepage_trans_blocks(inode); handle = ext4_journal_start(inode, EXT4_HT_INODE, needed_blocks); if (IS_ERR(handle)) return; down_write(&EXT4_I(inode)->xattr_sem); if (!ext4_has_inline_data(inode)) { *has_inline = 0; ext4_journal_stop(handle); return; } if (ext4_orphan_add(handle, inode)) goto out; if (ext4_get_inode_loc(inode, &is.iloc)) goto out; down_write(&EXT4_I(inode)->i_data_sem); i_size = inode->i_size; inline_size = ext4_get_inline_size(inode); EXT4_I(inode)->i_disksize = i_size; if (i_size < inline_size) { /* Clear the content in the xattr space. */ if (inline_size > EXT4_MIN_INLINE_DATA_SIZE) { if (ext4_xattr_ibody_find(inode, &i, &is)) goto out_error; BUG_ON(is.s.not_found); value_len = le32_to_cpu(is.s.here->e_value_size); value = kmalloc(value_len, GFP_NOFS); if (!value) goto out_error; if (ext4_xattr_ibody_get(inode, i.name_index, i.name, value, value_len)) goto out_error; i.value = value; i.value_len = i_size > EXT4_MIN_INLINE_DATA_SIZE ? i_size - EXT4_MIN_INLINE_DATA_SIZE : 0; if (ext4_xattr_ibody_inline_set(handle, inode, &i, &is)) goto out_error; } /* Clear the content within i_blocks. */ if (i_size < EXT4_MIN_INLINE_DATA_SIZE) { void *p = (void *) ext4_raw_inode(&is.iloc)->i_block; memset(p + i_size, 0, EXT4_MIN_INLINE_DATA_SIZE - i_size); } EXT4_I(inode)->i_inline_size = i_size < EXT4_MIN_INLINE_DATA_SIZE ? EXT4_MIN_INLINE_DATA_SIZE : i_size; } out_error: up_write(&EXT4_I(inode)->i_data_sem); out: brelse(is.iloc.bh); up_write(&EXT4_I(inode)->xattr_sem); kfree(value); if (inode->i_nlink) ext4_orphan_del(handle, inode); inode->i_mtime = inode->i_ctime = ext4_current_time(inode); ext4_mark_inode_dirty(handle, inode); if (IS_SYNC(inode)) ext4_handle_sync(handle); ext4_journal_stop(handle); return; }
static int __init rtlx_module_init(void) { struct device *dev; int i, err; if (!cpu_has_mipsmt) { printk("VPE loader: not a MIPS MT capable processor\n"); return -ENODEV; } if (tclimit == 0) { printk(KERN_WARNING "No TCs reserved for AP/SP, not " "initializing RTLX.\nPass maxtcs=<n> argument as kernel " "argument\n"); return -ENODEV; } major = register_chrdev(0, module_name, &rtlx_fops); if (major < 0) { printk(register_chrdev_failed); return major; } /* initialise the wait queues */ for (i = 0; i < RTLX_CHANNELS; i++) { init_waitqueue_head(&channel_wqs[i].rt_queue); init_waitqueue_head(&channel_wqs[i].lx_queue); atomic_set(&channel_wqs[i].in_open, 0); mutex_init(&channel_wqs[i].mutex); dev = device_create(mt_class, NULL, MKDEV(major, i), NULL, "%s%d", module_name, i); if (IS_ERR(dev)) { err = PTR_ERR(dev); goto out_chrdev; } } /* set up notifiers */ notify.start = starting; notify.stop = stopping; vpe_notify(tclimit, ¬ify); if (cpu_has_vint) set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch); else { pr_err("APRP RTLX init on non-vectored-interrupt processor\n"); err = -ENODEV; goto out_chrdev; } rtlx_irq.dev_id = rtlx; setup_irq(rtlx_irq_num, &rtlx_irq); return 0; out_chrdev: for (i = 0; i < RTLX_CHANNELS; i++) device_destroy(mt_class, MKDEV(major, i)); return err; }
int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len) { struct hfs_btree *tree; struct hfs_bnode *node, *new_node; int size, key_len, rec; int data_off, end_off; int idx_rec_off, data_rec_off, end_rec_off; __be32 cnid; tree = fd->tree; if (!fd->bnode) { if (!tree->root) hfs_btree_inc_height(tree); fd->bnode = hfs_bnode_find(tree, tree->leaf_head); if (IS_ERR(fd->bnode)) return PTR_ERR(fd->bnode); fd->record = -1; } new_node = NULL; key_len = be16_to_cpu(fd->search_key->key_len) + 2; again: /* new record idx and complete record size */ rec = fd->record + 1; size = key_len + entry_len; node = fd->bnode; hfs_bnode_dump(node); /* get last offset */ end_rec_off = tree->node_size - (node->num_recs + 1) * 2; end_off = hfs_bnode_read_u16(node, end_rec_off); end_rec_off -= 2; hfs_dbg(BNODE_MOD, "insert_rec: %d, %d, %d, %d\n", rec, size, end_off, end_rec_off); if (size > end_rec_off - end_off) { if (new_node) panic("not enough room!\n"); new_node = hfs_bnode_split(fd); if (IS_ERR(new_node)) return PTR_ERR(new_node); goto again; } if (node->type == HFS_NODE_LEAF) { tree->leaf_count++; mark_inode_dirty(tree->inode); } node->num_recs++; /* write new last offset */ hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs); hfs_bnode_write_u16(node, end_rec_off, end_off + size); data_off = end_off; data_rec_off = end_rec_off + 2; idx_rec_off = tree->node_size - (rec + 1) * 2; if (idx_rec_off == data_rec_off) goto skip; /* move all following entries */ do { data_off = hfs_bnode_read_u16(node, data_rec_off + 2); hfs_bnode_write_u16(node, data_rec_off, data_off + size); data_rec_off += 2; } while (data_rec_off < idx_rec_off); /* move data away */ hfs_bnode_move(node, data_off + size, data_off, end_off - data_off); skip: hfs_bnode_write(node, fd->search_key, data_off, key_len); hfs_bnode_write(node, entry, data_off + key_len, entry_len); hfs_bnode_dump(node); if (new_node) { /* update parent key if we inserted a key * at the start of the first node */ if (!rec && new_node != node) hfs_brec_update_parent(fd); hfs_bnode_put(fd->bnode); if (!new_node->parent) { hfs_btree_inc_height(tree); new_node->parent = tree->root; } fd->bnode = hfs_bnode_find(tree, new_node->parent); /* create index data entry */ cnid = cpu_to_be32(new_node->this); entry = &cnid; entry_len = sizeof(cnid); /* get index key */ hfs_bnode_read_key(new_node, fd->search_key, 14); __hfs_brec_find(fd->bnode, fd, hfs_find_rec_by_key); hfs_bnode_put(new_node); new_node = NULL; if ((tree->attributes & HFS_TREE_VARIDXKEYS) || (tree->cnid == HFSPLUS_ATTR_CNID)) key_len = be16_to_cpu(fd->search_key->key_len) + 2; else { fd->search_key->key_len = cpu_to_be16(tree->max_key_len); key_len = tree->max_key_len + 2; } goto again; } if (!rec) hfs_brec_update_parent(fd); return 0; }
static int db8131m_power_on(void) { struct regulator *regulator; int ret = 0; pr_debug("%s: in", __func__); db8131m_gpio_request(); /* 5M_CAM_nSTBY(5M STBY) LOW */ ret = gpio_request(GPIO_5M_CAM_nSTBY, "GPM0"); if (ret) { pr_err("faile to request gpio(GPIO_5M_CAM_nSTBY)"); return ret; } ret = gpio_direction_output(GPIO_5M_CAM_nSTBY, 0); CAM_CHECK_ERR_RET(ret, "low 5M_CAM_nSTBY"); /* 5M_CAM_RESET(5M RESET) LOW */ ret = gpio_request(GPIO_5M_CAM_RESET, "GPF1"); if (ret) { pr_err("faile to request gpio(GPIO_5M_CAM_RESET)"); return ret; } ret = gpio_direction_output(GPIO_5M_CAM_RESET, 0); CAM_CHECK_ERR_RET(ret, "low 5M_CAM_RESET"); /* VT_CAM_1.8V(VDDIO) */ regulator = regulator_get(NULL, "vt_cam_1.8v"); if (IS_ERR(regulator)) return -ENODEV; ret = regulator_enable(regulator); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "enable vt_cam_1.8v"); /* CAM_SENSOR_A2.8V */ regulator = regulator_get(NULL, "cam_sensor_a2.8v"); if (IS_ERR(regulator)) return -ENODEV; ret = regulator_enable(regulator); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "enable cam_sensor_a2.8v"); /* CAM_DVDD_1.5V(1.3M Core 1.8V) */ regulator = regulator_get(NULL, "cam_dvdd_1.5v"); if (IS_ERR(regulator)) return -ENODEV; ret = regulator_enable(regulator); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "enable cam_dvdd_1.5v"); /* CAM_ISP_CORE_1.2V ENABLE */ regulator = regulator_get(NULL, "cam_isp_core_1.2v"); if (IS_ERR(regulator)) return -ENODEV; ret = regulator_enable(regulator); CAM_CHECK_ERR_RET(ret, "enable cam_isp_core_1.2v"); mdelay(2); /* 1ms */ /* CAM_ISP_CORE_1.2V DISABLE */ ret = regulator_force_disable(regulator); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "disable cam_isp_core_1.2v"); /* VT_CAM_nSTBY(1.3M EN) EN */ ret = gpio_direction_output(GPIO_VT_CAM_nSTBY, 1); CAM_CHECK_ERR_RET(ret, "high VT_CAM_nSTBY"); /* MCLK */ ret = s3c_gpio_cfgpin(GPIO_CAM_MCLK, S3C_GPIO_SFN(2)); CAM_CHECK_ERR_RET(ret, "cfg mclk"); s3c_gpio_setpull(GPIO_CAM_MCLK, S3C_GPIO_PULL_NONE); mdelay(1); /* 20us */ /* CAM_VT_nRST(1.3M RESET) EN */ ret = gpio_direction_output(GPIO_CAM_VT_nRST, 1); CAM_CHECK_ERR_RET(ret, "high CAM_VT_nRST"); mdelay(5); /* 70000 cycle */ gpio_free(GPIO_5M_CAM_nSTBY); gpio_free(GPIO_5M_CAM_RESET); gpio_free(GPIO_VT_CAM_nSTBY); gpio_free(GPIO_CAM_VT_nRST); gpio_free(GPIO_VT_CAM_ID); return ret; }
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mips_vdso_image *image = current->thread.abi->vdso; struct mm_struct *mm = current->mm; unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr; struct vm_area_struct *vma; struct resource gic_res; int ret; down_write(&mm->mmap_sem); /* * Determine total area size. This includes the VDSO data itself, the * data page, and the GIC user page if present. Always create a mapping * for the GIC user area if the GIC is present regardless of whether it * is the current clocksource, in case it comes into use later on. We * only map a page even though the total area is 64K, as we only need * the counter registers at the start. */ gic_size = gic_present ? PAGE_SIZE : 0; vvar_size = gic_size + PAGE_SIZE; size = vvar_size + image->size; base = get_unmapped_area(NULL, 0, size, 0, 0); if (IS_ERR_VALUE(base)) { ret = base; goto out; } data_addr = base + gic_size; vdso_addr = data_addr + PAGE_SIZE; vma = _install_special_mapping(mm, base, vvar_size, VM_READ | VM_MAYREAD, &vdso_vvar_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } /* Map GIC user page. */ if (gic_size) { ret = gic_get_usm_range(&gic_res); if (ret) goto out; ret = io_remap_pfn_range(vma, base, gic_res.start >> PAGE_SHIFT, gic_size, pgprot_noncached(PAGE_READONLY)); if (ret) goto out; } /* Map data page. */ ret = remap_pfn_range(vma, data_addr, virt_to_phys(&vdso_data) >> PAGE_SHIFT, PAGE_SIZE, PAGE_READONLY); if (ret) goto out; /* Map VDSO image. */ vma = _install_special_mapping(mm, vdso_addr, image->size, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, &image->mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } mm->context.vdso = (void *)vdso_addr; ret = 0; out: up_write(&mm->mmap_sem); return ret; }
static __devinit int wm831x_buckv_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_pdata *pdata = wm831x->dev->platform_data; int id = pdev->id % ARRAY_SIZE(pdata->dcdc); struct wm831x_dcdc *dcdc; struct resource *res; int ret, irq; dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1); if (pdata == NULL || pdata->dcdc[id] == NULL) return -ENODEV; dcdc = kzalloc(sizeof(struct wm831x_dcdc), GFP_KERNEL); if (dcdc == NULL) { dev_err(&pdev->dev, "Unable to allocate private data\n"); return -ENOMEM; } dcdc->wm831x = wm831x; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (res == NULL) { dev_err(&pdev->dev, "No I/O resource\n"); ret = -EINVAL; goto err; } dcdc->base = res->start; snprintf(dcdc->name, sizeof(dcdc->name), "DCDC%d", id + 1); dcdc->desc.name = dcdc->name; dcdc->desc.id = id; dcdc->desc.type = REGULATOR_VOLTAGE; dcdc->desc.n_voltages = WM831X_BUCKV_MAX_SELECTOR + 1; dcdc->desc.ops = &wm831x_buckv_ops; dcdc->desc.owner = THIS_MODULE; ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_ON_CONFIG); if (ret < 0) { dev_err(wm831x->dev, "Failed to read ON VSEL: %d\n", ret); goto err; } dcdc->on_vsel = ret & WM831X_DC1_ON_VSEL_MASK; ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_ON_CONFIG); if (ret < 0) { dev_err(wm831x->dev, "Failed to read DVS VSEL: %d\n", ret); goto err; } dcdc->dvs_vsel = ret & WM831X_DC1_DVS_VSEL_MASK; if (pdata->dcdc[id]) wm831x_buckv_dvs_init(dcdc, pdata->dcdc[id]->driver_data); dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev, pdata->dcdc[id], dcdc); if (IS_ERR(dcdc->regulator)) { ret = PTR_ERR(dcdc->regulator); dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n", id + 1, ret); goto err; } irq = platform_get_irq_byname(pdev, "UV"); ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_uv_irq, IRQF_TRIGGER_RISING, dcdc->name, dcdc); if (ret != 0) { dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", irq, ret); goto err_regulator; } irq = platform_get_irq_byname(pdev, "HC"); ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_oc_irq, IRQF_TRIGGER_RISING, dcdc->name, dcdc); if (ret != 0) { dev_err(&pdev->dev, "Failed to request HC IRQ %d: %d\n", irq, ret); goto err_uv; } platform_set_drvdata(pdev, dcdc); return 0; err_uv: wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc); err_regulator: regulator_unregister(dcdc->regulator); err: if (dcdc->dvs_gpio) gpio_free(dcdc->dvs_gpio); kfree(dcdc); return ret; }
static __devinit int wm831x_boostp_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_pdata *pdata = wm831x->dev->platform_data; int id = pdev->id % ARRAY_SIZE(pdata->dcdc); struct wm831x_dcdc *dcdc; struct resource *res; int ret, irq; dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1); if (pdata == NULL || pdata->dcdc[id] == NULL) return -ENODEV; dcdc = kzalloc(sizeof(struct wm831x_dcdc), GFP_KERNEL); if (dcdc == NULL) { dev_err(&pdev->dev, "Unable to allocate private data\n"); return -ENOMEM; } dcdc->wm831x = wm831x; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (res == NULL) { dev_err(&pdev->dev, "No I/O resource\n"); ret = -EINVAL; goto err; } dcdc->base = res->start; snprintf(dcdc->name, sizeof(dcdc->name), "DCDC%d", id + 1); dcdc->desc.name = dcdc->name; dcdc->desc.id = id; dcdc->desc.type = REGULATOR_VOLTAGE; dcdc->desc.ops = &wm831x_boostp_ops; dcdc->desc.owner = THIS_MODULE; dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev, pdata->dcdc[id], dcdc); if (IS_ERR(dcdc->regulator)) { ret = PTR_ERR(dcdc->regulator); dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n", id + 1, ret); goto err; } irq = platform_get_irq_byname(pdev, "UV"); ret = wm831x_request_irq(wm831x, irq, wm831x_dcdc_uv_irq, IRQF_TRIGGER_RISING, dcdc->name, dcdc); if (ret != 0) { dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", irq, ret); goto err_regulator; } platform_set_drvdata(pdev, dcdc); return 0; err_regulator: regulator_unregister(dcdc->regulator); err: kfree(dcdc); return ret; }
int ip6_route_me_harder(struct sk_buff *skb) { struct net *net = dev_net(skb_dst(skb)->dev); struct ipv6hdr *iph = ipv6_hdr(skb); struct dst_entry *dst; struct flowi6 fl6 = { .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0, .flowi6_mark = skb->mark, .daddr = iph->daddr, .saddr = iph->saddr, }; dst = ip6_route_output(net, skb->sk, &fl6); if (dst->error) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n"); dst_release(dst); return -EINVAL; } /* Drop old route. */ skb_dst_drop(skb); skb_dst_set(skb, dst); #ifdef CONFIG_XFRM if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) { skb_dst_set(skb, NULL); dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), skb->sk, 0); if (IS_ERR(dst)) return -1; skb_dst_set(skb, dst); } #endif return 0; } EXPORT_SYMBOL(ip6_route_me_harder); /* * Extra routing may needed on local out, as the QUEUE target never * returns control to the table. */ struct ip6_rt_info { struct in6_addr daddr; struct in6_addr saddr; u_int32_t mark; }; static void nf_ip6_saveroute(const struct sk_buff *skb, struct nf_queue_entry *entry) { struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); if (entry->hook == NF_INET_LOCAL_OUT) { struct ipv6hdr *iph = ipv6_hdr(skb); rt_info->daddr = iph->daddr; rt_info->saddr = iph->saddr; rt_info->mark = skb->mark; } } static int nf_ip6_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry) { struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); if (entry->hook == NF_INET_LOCAL_OUT) { struct ipv6hdr *iph = ipv6_hdr(skb); if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) || !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) || skb->mark != rt_info->mark) return ip6_route_me_harder(skb); } return 0; } static int nf_ip6_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict) { static const struct ipv6_pinfo fake_pinfo; static const struct inet_sock fake_sk = { /* makes ip6_route_output set RT6_LOOKUP_F_IFACE: */ .sk.sk_bound_dev_if = 1, .pinet6 = (struct ipv6_pinfo *) &fake_pinfo, }; const void *sk = strict ? &fake_sk : NULL; *dst = ip6_route_output(net, sk, &fl->u.ip6); return (*dst)->error; } __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol) { struct ipv6hdr *ip6h = ipv6_hdr(skb); __sum16 csum = 0; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN) break; if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb->len - dataoff, protocol, csum_sub(skb->csum, skb_checksum(skb, 0, dataoff, 0)))) { skb->ip_summed = CHECKSUM_UNNECESSARY; break; } /* fall through */ case CHECKSUM_NONE: skb->csum = ~csum_unfold( csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb->len - dataoff, protocol, csum_sub(0, skb_checksum(skb, 0, dataoff, 0)))); csum = __skb_checksum_complete(skb); } return csum; } EXPORT_SYMBOL(nf_ip6_checksum); static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, unsigned int len, u_int8_t protocol) { struct ipv6hdr *ip6h = ipv6_hdr(skb); __wsum hsum; __sum16 csum = 0; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (len == skb->len - dataoff) return nf_ip6_checksum(skb, hook, dataoff, protocol); /* fall through */ case CHECKSUM_NONE: hsum = skb_checksum(skb, 0, dataoff, 0); skb->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb->len - dataoff, protocol, csum_sub(0, hsum))); skb->ip_summed = CHECKSUM_NONE; return __skb_checksum_complete_head(skb, dataoff + len); } return csum; }; static const struct nf_afinfo nf_ip6_afinfo = { .family = AF_INET6, .checksum = nf_ip6_checksum, .checksum_partial = nf_ip6_checksum_partial, .route = nf_ip6_route, .saveroute = nf_ip6_saveroute, .reroute = nf_ip6_reroute, .route_key_size = sizeof(struct ip6_rt_info), }; int __init ipv6_netfilter_init(void) { return nf_register_afinfo(&nf_ip6_afinfo); } /* This can be called from inet6_init() on errors, so it cannot * be marked __exit. -DaveM */ void ipv6_netfilter_fini(void) { nf_unregister_afinfo(&nf_ip6_afinfo); }
void mdp_config_vsync(struct msm_fb_data_type *mfd) { /* vsync on primary lcd only for now */ if ((mfd->dest != DISPLAY_LCD) || (mfd->panel_info.pdest != DISPLAY_1) || (!vsync_mode)) { goto err_handle; } vsync_clk_status = 0; if (mfd->panel_info.lcd.vsync_enable) { mfd->total_porch_lines = mfd->panel_info.lcd.v_back_porch + mfd->panel_info.lcd.v_front_porch + mfd->panel_info.lcd.v_pulse_width; mfd->total_lcd_lines = mfd->panel_info.yres + mfd->total_porch_lines; mfd->lcd_ref_usec_time = 100000000 / mfd->panel_info.lcd.refx100; mfd->vsync_handler_pending = FALSE; mfd->last_vsync_timetick.tv64 = 0; #ifdef MDP_HW_VSYNC if (mdp_vsync_clk == NULL) mdp_vsync_clk = clk_get(NULL, "mdp_vsync_clk"); if (IS_ERR(mdp_vsync_clk)) { printk(KERN_ERR "error: can't get mdp_vsync_clk!\n"); mfd->use_mdp_vsync = 0; } else mfd->use_mdp_vsync = 1; if (mfd->use_mdp_vsync) { uint32 vsync_cnt_cfg_dem; uint32 mdp_vsync_clk_speed_hz; mdp_vsync_clk_speed_hz = clk_get_rate(mdp_vsync_clk); if (mdp_vsync_clk_speed_hz == 0) { mfd->use_mdp_vsync = 0; } else { /* * Do this calculation in 2 steps for * rounding uint32 properly. */ vsync_cnt_cfg_dem = (mfd->panel_info.lcd.refx100 * mfd->total_lcd_lines) / 100; vsync_cnt_cfg = (mdp_vsync_clk_speed_hz) / vsync_cnt_cfg_dem; mdp_vsync_cfg_regs(mfd, TRUE); } } #else mfd->use_mdp_vsync = 0; hrtimer_init(&mfd->dma_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); mfd->dma_hrtimer.function = mdp_dma2_vsync_hrtimer_handler; mfd->vsync_width_boundary = vmalloc(mfd->panel_info.xres * 4); #endif #ifdef CONFIG_FB_MSM_MDDI mfd->channel_irq = 0; if (mfd->panel_info.lcd.hw_vsync_mode) { u32 vsync_gpio = mfd->vsync_gpio; u32 ret; if (vsync_gpio == -1) { MSM_FB_INFO("vsync_gpio not defined!\n"); goto err_handle; } ret = gpio_tlmm_config(GPIO_CFG (vsync_gpio, (mfd->use_mdp_vsync) ? 1 : 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG_ENABLE); if (ret) goto err_handle; /* * if use_mdp_vsync, then no interrupt need since * mdp_vsync is feed directly to mdp to reset the * write pointer counter. therefore no irq_handler * need to reset write pointer counter. */ if (!mfd->use_mdp_vsync) { mfd->channel_irq = MSM_GPIO_TO_INT(vsync_gpio); if (request_irq (mfd->channel_irq, &mdp_hw_vsync_handler_proxy, IRQF_TRIGGER_FALLING, "VSYNC_GPIO", (void *)mfd)) { MSM_FB_INFO ("irq=%d failed! vsync_gpio=%d\n", mfd->channel_irq, vsync_gpio); goto err_handle; } } } #endif mdp_hw_vsync_clk_enable(mfd); mdp_set_vsync((unsigned long)mfd); } return; err_handle: if (mfd->vsync_width_boundary) vfree(mfd->vsync_width_boundary); mfd->panel_info.lcd.vsync_enable = FALSE; printk(KERN_ERR "%s: failed!\n", __func__); }
static int s3c_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtc; struct rtc_time rtc_tm; struct resource *res; int ret; int tmp; dev_dbg(&pdev->dev, "%s: probe=%p\n", __func__, pdev); /* find the IRQs */ s3c_rtc_tickno = platform_get_irq(pdev, 1); if (s3c_rtc_tickno < 0) { dev_err(&pdev->dev, "no irq for rtc tick\n"); return s3c_rtc_tickno; } s3c_rtc_alarmno = platform_get_irq(pdev, 0); if (s3c_rtc_alarmno < 0) { dev_err(&pdev->dev, "no irq for alarm\n"); return s3c_rtc_alarmno; } dev_dbg(&pdev->dev, "s3c2410_rtc: tick irq %d, alarm irq %d\n", s3c_rtc_tickno, s3c_rtc_alarmno); /* get the memory region */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); s3c_rtc_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(s3c_rtc_base)) return PTR_ERR(s3c_rtc_base); rtc_clk = devm_clk_get(&pdev->dev, "gate_rtc"); if (IS_ERR(rtc_clk)) { dev_err(&pdev->dev, "failed to find rtc clock source\n"); ret = PTR_ERR(rtc_clk); rtc_clk = NULL; return ret; } clk_prepare_enable(rtc_clk); /* check to see if everything is setup correctly */ s3c_rtc_enable(pdev, 1); dev_dbg(&pdev->dev, "s3c2410_rtc: RTCCON=%02x\n", readw(s3c_rtc_base + S3C2410_RTCCON)); device_init_wakeup(&pdev->dev, 1); /* register RTC and exit */ rtc = devm_rtc_device_register(&pdev->dev, "s3c", &s3c_rtcops, THIS_MODULE); if (IS_ERR(rtc)) { dev_err(&pdev->dev, "cannot attach rtc\n"); ret = PTR_ERR(rtc); goto err_nortc; } s3c_rtc_cpu_type = s3c_rtc_get_driver_data(pdev); /* Check RTC Time */ s3c_rtc_gettime(NULL, &rtc_tm); if (rtc_valid_tm(&rtc_tm)) { rtc_tm.tm_year = 100; rtc_tm.tm_mon = 0; rtc_tm.tm_mday = 1; rtc_tm.tm_hour = 0; rtc_tm.tm_min = 0; rtc_tm.tm_sec = 0; s3c_rtc_settime(NULL, &rtc_tm); dev_warn(&pdev->dev, "warning: invalid RTC value so initializing it\n"); } if (s3c_rtc_cpu_type != TYPE_S3C2410) rtc->max_user_freq = 32768; else rtc->max_user_freq = 128; if (s3c_rtc_cpu_type == TYPE_S3C2416 || s3c_rtc_cpu_type == TYPE_S3C2443) { tmp = readw(s3c_rtc_base + S3C2410_RTCCON); tmp |= S3C2443_RTCCON_TICSEL; writew(tmp, s3c_rtc_base + S3C2410_RTCCON); } platform_set_drvdata(pdev, rtc); s3c_rtc_setfreq(&pdev->dev, 1); ret = devm_request_irq(&pdev->dev, s3c_rtc_alarmno, s3c_rtc_alarmirq, 0, "s3c2410-rtc alarm", rtc); if (ret) { dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret); goto err_alarm_irq; } ret = devm_request_irq(&pdev->dev, s3c_rtc_tickno, s3c_rtc_tickirq, 0, "s3c2410-rtc tick", rtc); if (ret) { dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret); goto err_alarm_irq; } clk_disable(rtc_clk); return 0; err_alarm_irq: platform_set_drvdata(pdev, NULL); err_nortc: s3c_rtc_enable(pdev, 0); clk_disable_unprepare(rtc_clk); return ret; }
/** * gserial_setup - initialize TTY driver for one or more ports * @g: gadget to associate with these ports * @count: how many ports to support * Context: may sleep * * The TTY stack needs to know in advance how many devices it should * plan to manage. Use this call to set up the ports you will be * exporting through USB. Later, connect them to functions based * on what configuration is activated by the USB host; and disconnect * them as appropriate. * * An example would be a two-configuration device in which both * configurations expose port 0, but through different functions. * One configuration could even expose port 1 while the other * one doesn't. * * Returns negative errno or zero. */ int gserial_setup(struct usb_gadget *g, unsigned count) { unsigned i; struct usb_cdc_line_coding coding; int status; if (count == 0 || count > N_PORTS) return -EINVAL; gs_tty_driver = alloc_tty_driver(count); if (!gs_tty_driver) return -ENOMEM; gs_tty_driver->owner = THIS_MODULE; gs_tty_driver->driver_name = "g_serial"; gs_tty_driver->name = PREFIX; /* uses dynamically assigned dev_t values */ gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; gs_tty_driver->subtype = SERIAL_TYPE_NORMAL; gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_RESET_TERMIOS; gs_tty_driver->init_termios = tty_std_termios; /* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on * MS-Windows. Otherwise, most of these flags shouldn't affect * anything unless we were to actually hook up to a serial line. */ gs_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; gs_tty_driver->init_termios.c_ispeed = 9600; gs_tty_driver->init_termios.c_ospeed = 9600; coding.dwDTERate = cpu_to_le32(9600); coding.bCharFormat = 8; coding.bParityType = USB_CDC_NO_PARITY; coding.bDataBits = USB_CDC_1_STOP_BITS; tty_set_operations(gs_tty_driver, &gs_tty_ops); gserial_wq = create_singlethread_workqueue("k_gserial"); if (!gserial_wq) { status = -ENOMEM; goto fail; } /* make devices be openable */ for (i = 0; i < count; i++) { mutex_init(&ports[i].lock); status = gs_port_alloc(i, &coding); if (status) { count = i; goto fail; } } n_ports = count; /* export the driver ... */ status = tty_register_driver(gs_tty_driver); if (status) { put_tty_driver(gs_tty_driver); pr_err("%s: cannot register, err %d\n", __func__, status); goto fail; } /* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */ for (i = 0; i < count; i++) { struct device *tty_dev; tty_dev = tty_register_device(gs_tty_driver, i, &g->dev); if (IS_ERR(tty_dev)) pr_warning("%s: no classdev for port %d, err %ld\n", __func__, i, PTR_ERR(tty_dev)); } for (i = 0; i < count; i++) usb_debugfs_init(ports[i].port, i); pr_debug("%s: registered %d ttyGS* device%s\n", __func__, count, (count == 1) ? "" : "s"); return status; fail: while (count--) kfree(ports[count].port); if (gserial_wq) destroy_workqueue(gserial_wq); put_tty_driver(gs_tty_driver); gs_tty_driver = NULL; return status; }
/*<BU5D08118 zhangtao 20100419 begin*/ static int aps_12d_probe( struct i2c_client *client, const struct i2c_device_id *id) { int ret; struct aps_data *aps; /*the aps_12d sensors ispower on*/ /* <BU5D07679 zhangtao 20100413 begin */ struct vreg *vreg_gp4=NULL; int rc; /* <DTS2010100800714 liugaofei 20101008 begin */ int i; /* DTS2010100800714 liugaofei 20101008 end */ vreg_gp4 = vreg_get(NULL, VREG_GP4_NAME); /* < DTS2010061200552 zhangtao 20100612 begin */ if (IS_ERR(vreg_gp4)) { pr_err("%s:gp4 power init get failed\n", __func__); } /* DTS2010061200552 zhangtao 20100612 end> */ /* <DTS2011012600839 liliang 20110215 begin */ /* set gp4 voltage as 2700mV for all */ rc = vreg_set_level(vreg_gp4,VREG_GP4_VOLTAGE_VALUE_2700); /* <DTS2011012600839 liliang 20110215 end >*/ if (rc) { PROXIMITY_DEBUG("%s: vreg_gp4 vreg_set_level failed \n", __func__); return rc; } rc = vreg_enable(vreg_gp4); if (rc) { pr_err("%s: vreg_gp4 vreg_enable failed \n", __func__); return rc; } mdelay(5); /* BU5D07679 zhangtao 20100413 end> */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { PROXIMITY_DEBUG(KERN_ERR "aps_12d_probe: need I2C_FUNC_I2C\n"); ret = -ENODEV; goto err_check_functionality_failed; } /* < DTS2010091001474 zhangtao 20100910 begin */ /* if querry the board is T1 or T2 turn off the proximity */ /*< DTS2010092400487 lijianzhao 20100924 begin */ /* This modification for version A&B of U8800,only */ if((machine_is_msm7x30_u8800())&&((get_hw_sub_board_id() == HW_VER_SUB_VA) || ((get_hw_sub_board_id() == HW_VER_SUB_VB)))) { printk(KERN_ERR "aps_12d_probe: aps is not supported in U8800 and U8800 T1 board!\n"); ret = -ENODEV; goto err_check_functionality_failed; } /* DTS2010092400487 lijianzhao 20100924 end >*/ /* DTS2010091001474 zhangtao 20100910 end > */ aps = kzalloc(sizeof(*aps), GFP_KERNEL); if (aps == NULL) { ret = -ENOMEM; goto err_alloc_data_failed; } mutex_init(&aps->mlock); INIT_WORK(&aps->work, aps_12d_work_func); aps->client = client; i2c_set_clientdata(client, aps); PROXIMITY_DEBUG(KERN_INFO "ghj aps_12d_probe send command 2\n "); /* Command 2 register: 25mA,DC,12bit,Range1 */ /* < DTS2010081803338 zhangtao 20100818 begin */ /* make the rang smaller can make the ir changge bigger */ ret = aps_i2c_reg_write(aps, APS_12D_REG_CMD2, \ /* < DTS2010102103994 zhangtao 20101112 begin */ (uint8_t)(APS_12D_IRDR_SEL_50MA << 6 | \ APS_12D_FREQ_SEL_DC << 4 | \ APS_12D_RES_SEL_12 << 2 | \ APS_12D_RANGE_SEL_ALS_1000)); /* DTS2010102103994 zhangtao 20101112 end > */ /* DTS2010081803338 zhangtao 20100818 end > */ if(ret < 0) { goto err_detect_failed; } /* <DTS2010100800714 liugaofei 20101008 begin */ range_index = 0; for(i = 0; i < TOTAL_RANGE_NUM; i++) { /* NOTE: do NOT use the last one */ up_range_value[i] = MAX_ADC_OUTPUT - high_threshold_value[i] - RANGE_FIX; } down_range_value[0] = 0; for(i = 1; i < TOTAL_RANGE_NUM; i++) { /* NOTE: do not use the first one */ down_range_value[i] = (MAX_ADC_OUTPUT - high_threshold_value[i-1] - (MAX_ADC_OUTPUT / ADJUST_GATE)) / 4; } /* DTS2010100800714 liugaofei 20101008 end */ /* < DTS2011042705601 zhangtao 20110427 begin */ /*we don't use the input device sensors again */ aps->input_dev = input_allocate_device(); if (aps->input_dev == NULL) { ret = -ENOMEM; PROXIMITY_DEBUG(KERN_ERR "aps_12d_probe: Failed to allocate input device\n"); goto err_input_dev_alloc_failed; } aps->input_dev->name = "sensors_aps"; aps->input_dev->id.bustype = BUS_I2C; input_set_drvdata(aps->input_dev, aps); ret = input_register_device(aps->input_dev); if (ret) { printk(KERN_ERR "aps_probe: Unable to register %s input device\n", aps->input_dev->name); goto err_input_register_device_failed; } /* DTS2011042705601 zhangtao 20110427 end > */ set_bit(EV_ABS, aps->input_dev->evbit); input_set_abs_params(aps->input_dev, ABS_LIGHT, 0, 10240, 0, 0); input_set_abs_params(aps->input_dev, ABS_DISTANCE, 0, 1, 0, 0); ret = misc_register(&light_device); if (ret) { printk(KERN_ERR "aps_12d_probe: light_device register failed\n"); goto err_light_misc_device_register_failed; } ret = misc_register(&proximity_device); if (ret) { printk(KERN_ERR "aps_12d_probe: proximity_device register failed\n"); goto err_proximity_misc_device_register_failed; } /* < DTS2010090300997 zhangtao 20100903 begin */ if( light_device.minor != MISC_DYNAMIC_MINOR ){ light_device_minor = light_device.minor; } if( proximity_device.minor != MISC_DYNAMIC_MINOR ){ proximity_device_minor = proximity_device.minor ; } wake_lock_init(&proximity_wake_lock, WAKE_LOCK_SUSPEND, "proximity"); /* DTS2010090300997 zhangtao 20100903 end > */ hrtimer_init(&aps->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); aps->timer.function = aps_timer_func; aps_wq = create_singlethread_workqueue("aps_wq"); if (!aps_wq) { ret = -ENOMEM; goto err_create_workqueue_failed; } this_aps_data =aps; /* <DTS2011032104626 shenjinming 20110321 begin */ #ifdef CONFIG_HUAWEI_HW_DEV_DCT /* detect current device successful, set the flag as present */ set_hw_dev_flag(DEV_I2C_APS); #endif /* <DTS2011032104626 shenjinming 20110321 end> */ /* < DTS2011052606009 jiaxianghong 20110527 end */ printk(KERN_INFO "aps_12d_probe: Start Proximity Sensor APS-12D\n"); /* DTS2010072801000 zhangtao 20100728 end > */ return 0; err_create_workqueue_failed: misc_deregister(&proximity_device); err_proximity_misc_device_register_failed: misc_deregister(&light_device); err_light_misc_device_register_failed: err_input_register_device_failed: input_free_device(aps->input_dev); err_input_dev_alloc_failed: err_detect_failed: kfree(aps); err_alloc_data_failed: err_check_functionality_failed: /* < DTS2010061200552 zhangtao 20100612 begin */ if(NULL != vreg_gp4) { /* < DTS2011052101089 shenjinming 20110521 begin */ /* can't use the flag ret here, it will change the return value of probe function */ vreg_disable(vreg_gp4); /* delete a line */ /* DTS2011052101089 shenjinming 20110521 end > */ } /* DTS2010061200552 zhangtao 20100612 end > */ return ret; }
int msm_cam_core_reset(void) { struct clk *clk1; int rc = 0; clk1 = clk_get(NULL, "csi_vfe_clk"); if (IS_ERR(clk1)) { pr_err("%s: did not get csi_vfe_clk\n", __func__); return PTR_ERR(clk1); } rc = clk_reset(clk1, CLK_RESET_ASSERT); if (rc) { pr_err("%s:csi_vfe_clk assert failed\n", __func__); clk_put(clk1); return rc; } usleep_range(1000, 1200); rc = clk_reset(clk1, CLK_RESET_DEASSERT); if (rc) { pr_err("%s:csi_vfe_clk deassert failed\n", __func__); clk_put(clk1); return rc; } clk_put(clk1); clk1 = clk_get(NULL, "csi_clk"); if (IS_ERR(clk1)) { pr_err("%s: did not get csi_clk\n", __func__); return PTR_ERR(clk1); } rc = clk_reset(clk1, CLK_RESET_ASSERT); if (rc) { pr_err("%s:csi_clk assert failed\n", __func__); clk_put(clk1); return rc; } usleep_range(1000, 1200); rc = clk_reset(clk1, CLK_RESET_DEASSERT); if (rc) { pr_err("%s:csi_clk deassert failed\n", __func__); clk_put(clk1); return rc; } clk_put(clk1); clk1 = clk_get(NULL, "csi_pclk"); if (IS_ERR(clk1)) { pr_err("%s: did not get csi_pclk\n", __func__); return PTR_ERR(clk1); } rc = clk_reset(clk1, CLK_RESET_ASSERT); if (rc) { pr_err("%s:csi_pclk assert failed\n", __func__); clk_put(clk1); return rc; } usleep_range(1000, 1200); rc = clk_reset(clk1, CLK_RESET_DEASSERT); if (rc) { pr_err("%s:csi_pclk deassert failed\n", __func__); clk_put(clk1); return rc; } clk_put(clk1); return rc; }
static int lcdc_probe(struct platform_device *pdev) { struct msm_fb_data_type *mfd; struct fb_info *fbi; struct platform_device *mdp_dev = NULL; struct msm_fb_panel_data *pdata = NULL; int rc; if (pdev->id == 0) { lcdc_pdata = pdev->dev.platform_data; return 0; } mfd = platform_get_drvdata(pdev); if (!mfd) return -ENODEV; if (mfd->key != MFD_KEY) return -EINVAL; if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST) return -ENOMEM; mdp_dev = platform_device_alloc("mdp", pdev->id); if (!mdp_dev) return -ENOMEM; /* * link to the latest pdev */ mfd->pdev = mdp_dev; mfd->dest = DISPLAY_LCDC; /* * alloc panel device data */ if (platform_device_add_data (mdp_dev, pdev->dev.platform_data, sizeof(struct msm_fb_panel_data))) { printk(KERN_ERR "lcdc_probe: platform_device_add_data failed!\n"); platform_device_put(mdp_dev); return -ENOMEM; } /* * data chain */ pdata = (struct msm_fb_panel_data *)mdp_dev->dev.platform_data; pdata->on = lcdc_on; pdata->off = lcdc_off; pdata->next = pdev; /* * get/set panel specific fb info */ mfd->panel_info = pdata->panel_info; if (mfd->index == 0) mfd->fb_imgType = MSMFB_DEFAULT_TYPE; else mfd->fb_imgType = MDP_RGB_565; fbi = mfd->fbi; fbi->var.pixclock = clk_round_rate(pixel_mdp_clk, mfd->panel_info.clk_rate); fbi->var.left_margin = mfd->panel_info.lcdc.h_back_porch; fbi->var.right_margin = mfd->panel_info.lcdc.h_front_porch; fbi->var.upper_margin = mfd->panel_info.lcdc.v_back_porch; fbi->var.lower_margin = mfd->panel_info.lcdc.v_front_porch; fbi->var.hsync_len = mfd->panel_info.lcdc.h_pulse_width; fbi->var.vsync_len = mfd->panel_info.lcdc.v_pulse_width; #ifndef CONFIG_MSM_BUS_SCALING mfd->ebi1_clk = clk_get(NULL, "ebi1_lcdc_clk"); if (IS_ERR(mfd->ebi1_clk)) return PTR_ERR(mfd->ebi1_clk); #endif /* * set driver data */ platform_set_drvdata(mdp_dev, mfd); /* * register in mdp driver */ rc = platform_device_add(mdp_dev); if (rc) goto lcdc_probe_err; pdev_list[pdev_list_cnt++] = pdev; return 0; lcdc_probe_err: platform_device_put(mdp_dev); return rc; }
static int s5k4ecgx_power_down(void) { struct regulator *regulator; int ret = 0; pr_debug("%s: in", __func__); s5k4ecgx_gpio_request(); /* VT_CAM_nSTBY(1.3M EN) LOW */ ret = gpio_request(GPIO_VT_CAM_nSTBY, "GPM0"); if (ret) { pr_err("faile to request gpio(GPIO_VT_CAM_nSTBY)"); return ret; } ret = gpio_direction_output(GPIO_VT_CAM_nSTBY, 0); CAM_CHECK_ERR_RET(ret, "low VT_CAM_nSTBY"); /* CAM_VT_nRST(1.3M RESET) LOW */ ret = gpio_request(GPIO_CAM_VT_nRST, "GPM1"); if (ret) { pr_err("faile to request gpio(GPIO_CAM_VT_nRST)"); return ret; } ret = gpio_direction_output(GPIO_CAM_VT_nRST, 0); CAM_CHECK_ERR_RET(ret, "low CAM_VT_nRST"); /* 5M_CAM_RESET(5M RESET) LOW */ ret = gpio_direction_output(GPIO_5M_CAM_RESET, 0); CAM_CHECK_ERR_RET(ret, "low 5M_CAM_RESET"); mdelay(1); /* 50us */ /* MCLK */ ret = s3c_gpio_cfgpin(GPIO_CAM_MCLK, S3C_GPIO_INPUT); s3c_gpio_setpull(GPIO_CAM_MCLK, S3C_GPIO_PULL_DOWN); CAM_CHECK_ERR(ret, "cfg mclk"); /* 5M_CAM_nSTBY(5M STBY) LOW */ ret = gpio_direction_output(GPIO_5M_CAM_nSTBY, 0); CAM_CHECK_ERR_RET(ret, "low 5M_CAM_nSTBY"); /* CAM_AF_2.8V */ regulator = regulator_get(NULL, "cam_af_2.8v"); if (IS_ERR(regulator)) return -ENODEV; if (regulator_is_enabled(regulator)) ret = regulator_force_disable(regulator); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "disable cam_af_2.8v"); /* CAM_DVDD_1.5V(1.3M Core 1.8V) */ regulator = regulator_get(NULL, "cam_dvdd_1.5v"); if (IS_ERR(regulator)) return -ENODEV; if (regulator_is_enabled(regulator)) ret = regulator_force_disable(regulator); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "disable cam_dvdd_1.5v"); /* CAM_SENSOR_IO_1.8V */ regulator = regulator_get(NULL, "cam_sensor_io_1.8v"); if (IS_ERR(regulator)) return -ENODEV; if (regulator_is_enabled(regulator)) ret = regulator_force_disable(regulator); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "disable cam_sensor_io_1.8v"); /* CAM_SENSOR_A2.8V */ regulator = regulator_get(NULL, "cam_sensor_a2.8v"); if (IS_ERR(regulator)) return -ENODEV; if (regulator_is_enabled(regulator)) ret = regulator_force_disable(regulator); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "disable cam_sensor_a2.8v"); /* CAM_ISP_CORE_1.2V */ regulator = regulator_get(NULL, "cam_isp_core_1.2v"); if (IS_ERR(regulator)) return -ENODEV; if (regulator_is_enabled(regulator)) ret = regulator_force_disable(regulator); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "disable cam_isp_core_1.2v"); gpio_free(GPIO_VT_CAM_nSTBY); gpio_free(GPIO_CAM_VT_nRST); gpio_free(GPIO_5M_CAM_RESET); gpio_free(GPIO_5M_CAM_nSTBY); return ret; }
/** * seq_read - ->read() method for sequential files. * @file: the file to read from * @buf: the buffer to read to * @size: the maximum number of bytes to read * @ppos: the current position in the file * * Ready-made ->f_op->read() */ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) { struct seq_file *m = file->private_data; size_t copied = 0; loff_t pos; size_t n; void *p; int err = 0; mutex_lock(&m->lock); /* * seq_file->op->..m_start/m_stop/m_next may do special actions * or optimisations based on the file->f_version, so we want to * pass the file->f_version to those methods. * * seq_file->version is just copy of f_version, and seq_file * methods can treat it simply as file version. * It is copied in first and copied out after all operations. * It is convenient to have it as part of structure to avoid the * need of passing another argument to all the seq_file methods. */ m->version = file->f_version; /* Don't assume *ppos is where we left it */ if (unlikely(*ppos != m->read_pos)) { while ((err = traverse(m, *ppos)) == -EAGAIN) ; if (err) { /* With prejudice... */ m->read_pos = 0; m->version = 0; m->index = 0; m->count = 0; goto Done; } else { m->read_pos = *ppos; } } /* grab buffer if we didn't have one */ if (!m->buf) { m->buf = seq_buf_alloc(m->size = PAGE_SIZE); if (!m->buf) goto Enomem; } /* if not empty - flush it first */ if (m->count) { n = min(m->count, size); err = copy_to_user(buf, m->buf + m->from, n); if (err) goto Efault; m->count -= n; m->from += n; size -= n; buf += n; copied += n; if (!m->count) m->index++; if (!size) goto Done; } /* we need at least one record in buffer */ pos = m->index; p = m->op->start(m, &pos); while (1) { err = PTR_ERR(p); if (!p || IS_ERR(p)) break; err = m->op->show(m, p); if (err < 0) break; if (unlikely(err)) m->count = 0; if (unlikely(!m->count)) { p = m->op->next(m, p, &pos); m->index = pos; continue; } if (m->count < m->size) goto Fill; m->op->stop(m, p); kvfree(m->buf); m->buf = seq_buf_alloc(m->size <<= 1); if (!m->buf) goto Enomem; m->count = 0; m->version = 0; pos = m->index; p = m->op->start(m, &pos); } m->op->stop(m, p); m->count = 0; goto Done; Fill: /* they want more? let's try to get some more */ while (m->count < size) { size_t offs = m->count; loff_t next = pos; p = m->op->next(m, p, &next); if (!p || IS_ERR(p)) { err = PTR_ERR(p); break; } err = m->op->show(m, p); if (seq_overflow(m) || err) { m->count = offs; if (likely(err <= 0)) break; } pos = next; } m->op->stop(m, p); n = min(m->count, size); err = copy_to_user(buf, m->buf, n); if (err) goto Efault; copied += n; m->count -= n; if (m->count) m->from = n; else pos++; m->index = pos; Done: if (!copied) copied = err; else { *ppos += copied; m->read_pos += copied; } file->f_version = m->version; mutex_unlock(&m->lock); return copied; Enomem: err = -ENOMEM; goto Done; Efault: err = -EFAULT; goto Done; }
void __init midas_camera_init(void) { #ifdef CONFIG_VIDEO_FIMC s3c_fimc0_set_platdata(&fimc_plat); s3c_fimc1_set_platdata(&fimc_plat); s3c_fimc2_set_platdata(NULL); s3c_fimc3_set_platdata(NULL); #ifdef CONFIG_EXYNOS_DEV_PD s3c_device_fimc0.dev.parent = &exynos4_device_pd[PD_CAM].dev; s3c_device_fimc1.dev.parent = &exynos4_device_pd[PD_CAM].dev; s3c_device_fimc2.dev.parent = &exynos4_device_pd[PD_CAM].dev; s3c_device_fimc3.dev.parent = &exynos4_device_pd[PD_CAM].dev; #endif #ifdef CONFIG_VIDEO_FIMC_MIPI s3c_csis0_set_platdata(NULL); s3c_csis1_set_platdata(NULL); #ifdef CONFIG_EXYNOS_DEV_PD s3c_device_csis0.dev.parent = &exynos4_device_pd[PD_CAM].dev; s3c_device_csis1.dev.parent = &exynos4_device_pd[PD_CAM].dev; #endif #endif #endif /* CONFIG_VIDEO_FIMC */ camera_class = class_create(THIS_MODULE, "camera"); if (!s5k4ecgx_dev) { s5k4ecgx_dev = device_create(camera_class, NULL, 0, NULL, "rear"); if (IS_ERR(s5k4ecgx_dev)) { pr_err("s5k4ecgx_dev : failed to create device!\n"); } else { if (device_create_file(s5k4ecgx_dev, &dev_attr_rear_flash) < 0) { pr_err("failed to create device file, %s\n", dev_attr_rear_flash.attr.name); } if (device_create_file(s5k4ecgx_dev, &dev_attr_rear_camtype) < 0) { pr_err("failed to create device file, %s\n", dev_attr_rear_camtype.attr.name); } if (device_create_file(s5k4ecgx_dev, &dev_attr_rear_camfw) < 0) { pr_err("failed to create device file, %s\n", dev_attr_rear_camfw.attr.name); } } } if (!db8131m_dev) { db8131m_dev = device_create(camera_class, NULL, 0, NULL, "front"); if (IS_ERR(db8131m_dev)) { pr_err("db8131m_dev : failed to create device!\n"); } else { if (device_create_file(db8131m_dev, &dev_attr_front_camtype) < 0) { pr_err("failed to create device file, %s\n", dev_attr_front_camtype.attr.name); } if (device_create_file(db8131m_dev, &dev_attr_front_camfw) < 0) { pr_err("failed to create device file, %s\n", dev_attr_front_camfw.attr.name); } } } }
static int mipi_dsi_panel_power(int on) { static struct regulator *v_lcm, *v_lcmio, *v_dsivdd; static bool bPanelPowerOn = false; int rc; char *lcm_str = "8921_l11"; char *lcmio_str = "8921_lvs5"; char *dsivdd_str = "8921_l2"; if (panel_type == PANEL_ID_NONE) return -ENODEV; PR_DISP_INFO("%s: state : %d\n", __func__, on); if (!dsi_power_on) { v_lcm = regulator_get(&msm_mipi_dsi1_device.dev, lcm_str); if (IS_ERR(v_lcm)) { PR_DISP_ERR("could not get %s, rc = %ld\n", lcm_str, PTR_ERR(v_lcm)); return -ENODEV; } v_lcmio = regulator_get(&msm_mipi_dsi1_device.dev, lcmio_str); if (IS_ERR(v_lcmio)) { PR_DISP_ERR("could not get %s, rc = %ld\n", lcmio_str, PTR_ERR(v_lcmio)); return -ENODEV; } v_dsivdd = regulator_get(&msm_mipi_dsi1_device.dev, dsivdd_str); if (IS_ERR(v_dsivdd)) { PR_DISP_ERR("could not get %s, rc = %ld\n", dsivdd_str, PTR_ERR(v_dsivdd)); return -ENODEV; } rc = regulator_set_voltage(v_lcm, 3000000, 3000000); if (rc) { PR_DISP_ERR("%s#%d: set_voltage %s failed, rc=%d\n", __func__, __LINE__, lcm_str, rc); return -EINVAL; } rc = regulator_set_voltage(v_dsivdd, 1200000, 1200000); if (rc) { PR_DISP_ERR("%s#%d: set_voltage %s failed, rc=%d\n", __func__, __LINE__, dsivdd_str, rc); return -EINVAL; } rc = gpio_request(VILLE_GPIO_LCD_RSTz, "LCM_RST_N"); if (rc) { PR_DISP_ERR("%s:LCM gpio %d request failed, rc=%d\n", __func__, VILLE_GPIO_LCD_RSTz, rc); return -EINVAL; } dsi_power_on = true; } if (on) { PR_DISP_INFO("%s: on\n", __func__); rc = regulator_set_optimum_mode(v_lcm, 100000); if (rc < 0) { PR_DISP_ERR("set_optimum_mode %s failed, rc=%d\n", lcm_str, rc); return -EINVAL; } rc = regulator_set_optimum_mode(v_dsivdd, 100000); if (rc < 0) { PR_DISP_ERR("set_optimum_mode %s failed, rc=%d\n", dsivdd_str, rc); return -EINVAL; } rc = regulator_enable(v_dsivdd); if (rc) { PR_DISP_ERR("enable regulator %s failed, rc=%d\n", dsivdd_str, rc); return -ENODEV; } hr_msleep(1); rc = regulator_enable(v_lcmio); if (rc) { PR_DISP_ERR("enable regulator %s failed, rc=%d\n", lcmio_str, rc); return -ENODEV; } rc = regulator_enable(v_lcm); if (rc) { PR_DISP_ERR("enable regulator %s failed, rc=%d\n", lcm_str, rc); return -ENODEV; } if (!mipi_lcd_on) { hr_msleep(10); gpio_set_value(VILLE_GPIO_LCD_RSTz, 1); hr_msleep(1); gpio_set_value(VILLE_GPIO_LCD_RSTz, 0); hr_msleep(35); gpio_set_value(VILLE_GPIO_LCD_RSTz, 1); } hr_msleep(60); bPanelPowerOn = true; } else { PR_DISP_INFO("%s: off\n", __func__); if (!bPanelPowerOn) return 0; hr_msleep(100); gpio_set_value(VILLE_GPIO_LCD_RSTz, 0); hr_msleep(10); if (regulator_disable(v_dsivdd)) { PR_DISP_ERR("%s: Unable to enable the regulator: %s\n", __func__, dsivdd_str); return -EINVAL; } if (regulator_disable(v_lcm)) { PR_DISP_ERR("%s: Unable to enable the regulator: %s\n", __func__, lcm_str); return -EINVAL; } hr_msleep(5); if (regulator_disable(v_lcmio)) { PR_DISP_ERR("%s: Unable to enable the regulator: %s\n", __func__, lcmio_str); return -EINVAL; } rc = regulator_set_optimum_mode(v_dsivdd, 100); if (rc < 0) { PR_DISP_ERR("%s: Unable to enable the regulator: %s\n", __func__, dsivdd_str); return -EINVAL; } bPanelPowerOn = false; } return 0; }
/* This requires some explaining. If DNAT has taken place, * we will need to fix up the destination Ethernet address. * * There are two cases to consider: * 1. The packet was DNAT'ed to a device in the same bridge * port group as it was received on. We can still bridge * the packet. * 2. The packet was DNAT'ed to a different device, either * a non-bridged device or another bridge port group. * The packet will need to be routed. * * The correct way of distinguishing between these two cases is to * call ip_route_input() and to look at skb->dst->dev, which is * changed to the destination device if ip_route_input() succeeds. * * Let's first consider the case that ip_route_input() succeeds: * * If the output device equals the logical bridge device the packet * came in on, we can consider this bridging. The corresponding MAC * address will be obtained in br_nf_pre_routing_finish_bridge. * Otherwise, the packet is considered to be routed and we just * change the destination MAC address so that the packet will * later be passed up to the IP stack to be routed. For a redirected * packet, ip_route_input() will give back the localhost as output device, * which differs from the bridge device. * * Let's now consider the case that ip_route_input() fails: * * This can be because the destination address is martian, in which case * the packet will be dropped. * If IP forwarding is disabled, ip_route_input() will fail, while * ip_route_output_key() can return success. The source * address for ip_route_output_key() is set to zero, so ip_route_output_key() * thinks we're handling a locally generated packet and won't care * if IP forwarding is enabled. If the output device equals the logical bridge * device, we proceed as if ip_route_input() succeeded. If it differs from the * logical bridge port or if ip_route_output_key() fails we drop the packet. */ static int br_nf_pre_routing_finish(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct iphdr *iph = ip_hdr(skb); struct nf_bridge_info *nf_bridge = skb->nf_bridge; struct rtable *rt; int err; int frag_max_size; frag_max_size = IPCB(skb)->frag_max_size; BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size; if (nf_bridge->mask & BRNF_PKT_TYPE) { skb->pkt_type = PACKET_OTHERHOST; nf_bridge->mask ^= BRNF_PKT_TYPE; } nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; if (dnat_took_place(skb)) { if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { struct in_device *in_dev = __in_dev_get_rcu(dev); /* If err equals -EHOSTUNREACH the error is due to a * martian destination or due to the fact that * forwarding is disabled. For most martian packets, * ip_route_output_key() will fail. It won't fail for 2 types of * martian destinations: loopback destinations and destination * 0.0.0.0. In both cases the packet will be dropped because the * destination is the loopback device and not the bridge. */ if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev)) goto free_skb; rt = ip_route_output(dev_net(dev), iph->daddr, 0, RT_TOS(iph->tos), 0); if (!IS_ERR(rt)) { /* - Bridged-and-DNAT'ed traffic doesn't * require ip_forwarding. */ if (rt->dst.dev == dev) { skb_dst_set(skb, &rt->dst); goto bridged_dnat; } ip_rt_put(rt); } free_skb: kfree_skb(skb); return 0; } else { if (skb_dst(skb)->dev == dev) { bridged_dnat: skb->dev = nf_bridge->physindev; nf_bridge_update_protocol(skb); nf_bridge_push_encap_header(skb); NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_nf_pre_routing_finish_bridge, 1); return 0; } ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr); skb->pkt_type = PACKET_HOST; } } else { rt = bridge_parent_rtable(nf_bridge->physindev); if (!rt) { kfree_skb(skb); return 0; } skb_dst_set_noref(skb, &rt->dst); } skb->dev = nf_bridge->physindev; nf_bridge_update_protocol(skb); nf_bridge_push_encap_header(skb); NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_handle_frame_finish, 1); return 0; }
static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct wil6210_priv *wil; struct device *dev = &pdev->dev; void __iomem *csr; int rc; /* check HW */ dev_info(&pdev->dev, WIL_NAME " device found [%04x:%04x] (rev %x)\n", (int)pdev->vendor, (int)pdev->device, (int)pdev->revision); if (pci_resource_len(pdev, 0) != WIL6210_MEM_SIZE) { dev_err(&pdev->dev, "Not " WIL_NAME "? " "BAR0 size is %lu while expecting %lu\n", (ulong)pci_resource_len(pdev, 0), WIL6210_MEM_SIZE); return -ENODEV; } rc = pci_enable_device(pdev); if (rc) { dev_err(&pdev->dev, "pci_enable_device failed\n"); return -ENODEV; } /* rollback to err_disable_pdev */ rc = pci_request_region(pdev, 0, WIL_NAME); if (rc) { dev_err(&pdev->dev, "pci_request_region failed\n"); goto err_disable_pdev; } /* rollback to err_release_reg */ csr = pci_ioremap_bar(pdev, 0); if (!csr) { dev_err(&pdev->dev, "pci_ioremap_bar failed\n"); rc = -ENODEV; goto err_release_reg; } /* rollback to err_iounmap */ dev_info(&pdev->dev, "CSR at %pR -> 0x%p\n", &pdev->resource[0], csr); wil = wil_if_alloc(dev, csr); if (IS_ERR(wil)) { rc = (int)PTR_ERR(wil); dev_err(dev, "wil_if_alloc failed: %d\n", rc); goto err_iounmap; } /* rollback to if_free */ pci_set_drvdata(pdev, wil); wil->pdev = pdev; wil6210_clear_irq(wil); /* FW should raise IRQ when ready */ rc = wil_if_pcie_enable(wil); if (rc) { wil_err(wil, "Enable device failed\n"); goto if_free; } /* rollback to bus_disable */ rc = wil_if_add(wil); if (rc) { wil_err(wil, "wil_if_add failed: %d\n", rc); goto bus_disable; } wil6210_debugfs_init(wil); /* check FW is alive */ wmi_echo(wil); return 0; bus_disable: wil_if_pcie_disable(wil); if_free: wil_if_free(wil); err_iounmap: pci_iounmap(pdev, csr); err_release_reg: pci_release_region(pdev, 0); err_disable_pdev: pci_disable_device(pdev); return rc; }
static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd) { struct hfs_btree *tree; struct hfs_bnode *node, *new_node, *next_node; struct hfs_bnode_desc node_desc; int num_recs, new_rec_off, new_off, old_rec_off; int data_start, data_end, size; tree = fd->tree; node = fd->bnode; new_node = hfs_bmap_alloc(tree); if (IS_ERR(new_node)) return new_node; hfs_bnode_get(node); hfs_dbg(BNODE_MOD, "split_nodes: %d - %d - %d\n", node->this, new_node->this, node->next); new_node->next = node->next; new_node->prev = node->this; new_node->parent = node->parent; new_node->type = node->type; new_node->height = node->height; if (node->next) next_node = hfs_bnode_find(tree, node->next); else next_node = NULL; if (IS_ERR(next_node)) { hfs_bnode_put(node); hfs_bnode_put(new_node); return next_node; } size = tree->node_size / 2 - node->num_recs * 2 - 14; old_rec_off = tree->node_size - 4; num_recs = 1; for (;;) { data_start = hfs_bnode_read_u16(node, old_rec_off); if (data_start > size) break; old_rec_off -= 2; if (++num_recs < node->num_recs) continue; /* panic? */ hfs_bnode_put(node); hfs_bnode_put(new_node); if (next_node) hfs_bnode_put(next_node); return ERR_PTR(-ENOSPC); } if (fd->record + 1 < num_recs) { /* new record is in the lower half, * so leave some more space there */ old_rec_off += 2; num_recs--; data_start = hfs_bnode_read_u16(node, old_rec_off); } else { hfs_bnode_put(node); hfs_bnode_get(new_node); fd->bnode = new_node; fd->record -= num_recs; fd->keyoffset -= data_start - 14; fd->entryoffset -= data_start - 14; } new_node->num_recs = node->num_recs - num_recs; node->num_recs = num_recs; new_rec_off = tree->node_size - 2; new_off = 14; size = data_start - new_off; num_recs = new_node->num_recs; data_end = data_start; while (num_recs) { hfs_bnode_write_u16(new_node, new_rec_off, new_off); old_rec_off -= 2; new_rec_off -= 2; data_end = hfs_bnode_read_u16(node, old_rec_off); new_off = data_end - size; num_recs--; } hfs_bnode_write_u16(new_node, new_rec_off, new_off); hfs_bnode_copy(new_node, 14, node, data_start, data_end - data_start); /* update new bnode header */ node_desc.next = cpu_to_be32(new_node->next); node_desc.prev = cpu_to_be32(new_node->prev); node_desc.type = new_node->type; node_desc.height = new_node->height; node_desc.num_recs = cpu_to_be16(new_node->num_recs); node_desc.reserved = 0; hfs_bnode_write(new_node, &node_desc, 0, sizeof(node_desc)); /* update previous bnode header */ node->next = new_node->this; hfs_bnode_read(node, &node_desc, 0, sizeof(node_desc)); node_desc.next = cpu_to_be32(node->next); node_desc.num_recs = cpu_to_be16(node->num_recs); hfs_bnode_write(node, &node_desc, 0, sizeof(node_desc)); /* update next bnode header */ if (next_node) { next_node->prev = new_node->this; hfs_bnode_read(next_node, &node_desc, 0, sizeof(node_desc)); node_desc.prev = cpu_to_be32(next_node->prev); hfs_bnode_write(next_node, &node_desc, 0, sizeof(node_desc)); hfs_bnode_put(next_node); } else if (node->this == tree->leaf_tail) {
int pohmelfs_data_lock(struct pohmelfs_inode *pi, u64 start, u32 size, int type) { struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb); struct pohmelfs_mcache *m; int err = -ENOMEM; struct iattr iattr; struct inode *inode = &pi->vfs_inode; dprintk("%s: %p: ino: %llu, start: %llu, size: %u, " "type: %d, locked as: %d, owned: %d.\n", __func__, &pi->vfs_inode, pi->ino, start, size, type, pi->lock_type, !!test_bit(NETFS_INODE_OWNED, &pi->state)); if (!pohmelfs_need_lock(pi, type)) return 0; m = pohmelfs_mcache_alloc(psb, start, size, NULL); if (IS_ERR(m)) return PTR_ERR(m); err = pohmelfs_send_lock_trans(pi, m->gen, start, size, type | POHMELFS_LOCK_GRAB); if (err) goto err_out_put; err = wait_for_completion_timeout(&m->complete, psb->mcache_timeout); if (err) err = m->err; else err = -ETIMEDOUT; if (err) { printk("%s: %p: ino: %llu, mgen: %llu, start: %llu, size: %u, err: %d.\n", __func__, &pi->vfs_inode, pi->ino, m->gen, start, size, err); } if (err && (err != -ENOENT)) goto err_out_put; if (!err) { netfs_convert_inode_info(&m->info); iattr.ia_valid = ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_SIZE | ATTR_ATIME; iattr.ia_mode = m->info.mode; iattr.ia_uid = m->info.uid; iattr.ia_gid = m->info.gid; iattr.ia_size = m->info.size; iattr.ia_atime = CURRENT_TIME; dprintk("%s: %p: ino: %llu, mgen: %llu, start: %llu, isize: %llu -> %llu.\n", __func__, &pi->vfs_inode, pi->ino, m->gen, start, inode->i_size, m->info.size); err = pohmelfs_setattr_raw(inode, &iattr); if (!err) { struct dentry *dentry = d_find_alias(inode); if (dentry) { fsnotify_change(dentry, iattr.ia_valid); dput(dentry); } } } pi->lock_type = type; set_bit(NETFS_INODE_OWNED, &pi->state); pohmelfs_mcache_put(psb, m); return 0; err_out_put: pohmelfs_mcache_put(psb, m); return err; }
static int cifs_read_super(struct super_block *sb, void *data, const char *devname, int silent) { struct inode *inode; struct cifs_sb_info *cifs_sb; int rc = 0; /* BB should we make this contingent on mount parm? */ sb->s_flags |= MS_NODIRATIME | MS_NOATIME; sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL); cifs_sb = CIFS_SB(sb); if (cifs_sb == NULL) return -ENOMEM; #ifdef CONFIG_CIFS_DFS_UPCALL /* copy mount params to sb for use in submounts */ /* BB: should we move this after the mount so we * do not have to do the copy on failed mounts? * BB: May be it is better to do simple copy before * complex operation (mount), and in case of fail * just exit instead of doing mount and attempting * undo it if this copy fails?*/ if (data) { int len = strlen(data); cifs_sb->mountdata = kzalloc(len + 1, GFP_KERNEL); if (cifs_sb->mountdata == NULL) { kfree(sb->s_fs_info); sb->s_fs_info = NULL; return -ENOMEM; } strncpy(cifs_sb->mountdata, data, len + 1); cifs_sb->mountdata[len] = '\0'; } #endif rc = cifs_mount(sb, cifs_sb, data, devname); if (rc) { if (!silent) cERROR(1, ("cifs_mount failed w/return code = %d", rc)); goto out_mount_failed; } sb->s_magic = CIFS_MAGIC_NUMBER; sb->s_op = &cifs_super_ops; /* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512) sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */ #ifdef CONFIG_CIFS_QUOTA sb->s_qcop = &cifs_quotactl_ops; #endif sb->s_blocksize = CIFS_MAX_MSGSIZE; sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */ inode = cifs_root_iget(sb, ROOT_I); if (IS_ERR(inode)) { rc = PTR_ERR(inode); inode = NULL; goto out_no_root; } sb->s_root = d_alloc_root(inode); if (!sb->s_root) { rc = -ENOMEM; goto out_no_root; } #ifdef CONFIG_CIFS_EXPERIMENTAL if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { cFYI(1, ("export ops supported")); sb->s_export_op = &cifs_export_ops; } #endif /* EXPERIMENTAL */ return 0; out_no_root: cERROR(1, ("cifs_read_super: get root inode failed")); if (inode) iput(inode); cifs_umount(sb, cifs_sb); out_mount_failed: if (cifs_sb) { #ifdef CONFIG_CIFS_DFS_UPCALL if (cifs_sb->mountdata) { kfree(cifs_sb->mountdata); cifs_sb->mountdata = NULL; } #endif unload_nls(cifs_sb->local_nls); kfree(cifs_sb); } return rc; }
int msm_camio_clk_enable(enum msm_camio_clk_type clktype) { int rc = 0; struct clk *clk = NULL; switch (clktype) { case CAMIO_VFE_MDC_CLK: camio_vfe_mdc_clk = clk = clk_get(NULL, "vfe_mdc_clk"); break; case CAMIO_MDC_CLK: camio_mdc_clk = clk = clk_get(NULL, "mdc_clk"); break; case CAMIO_VFE_CLK: camio_vfe_clk = clk = clk_get(NULL, "vfe_clk"); msm_camio_clk_rate_set_2(clk, camio_clk.vfe_clk_rate); break; case CAMIO_VFE_CAMIF_CLK: camio_vfe_camif_clk = clk = clk_get(NULL, "vfe_camif_clk"); break; case CAMIO_VFE_PBDG_CLK: camio_vfe_pbdg_clk = clk = clk_get(NULL, "vfe_pclk"); break; case CAMIO_CAM_MCLK_CLK: camio_cam_m_clk = clk = clk_get(NULL, "cam_m_clk"); msm_camio_clk_rate_set_2(clk, camio_clk.mclk_clk_rate); break; case CAMIO_CAMIF_PAD_PBDG_CLK: camio_camif_pad_pbdg_clk = clk = clk_get(NULL, "camif_pad_pclk"); break; case CAMIO_CSI0_CLK: camio_csi_clk = clk = clk_get(NULL, "csi_clk"); msm_camio_clk_rate_set_2(clk, 153600000); break; case CAMIO_CSI0_VFE_CLK: camio_csi_vfe_clk = clk = clk_get(NULL, "csi_vfe_clk"); break; case CAMIO_CSI0_PCLK: camio_csi_pclk = clk = clk_get(NULL, "csi_pclk"); break; case CAMIO_JPEG_CLK: camio_jpeg_clk = clk = clk_get(NULL, "jpeg_clk"); jpeg_clk_rate = clk_round_rate(clk, 144000000); clk_set_rate(clk, jpeg_clk_rate); break; case CAMIO_JPEG_PCLK: camio_jpeg_pclk = clk = clk_get(NULL, "jpeg_pclk"); break; case CAMIO_VPE_CLK: camio_vpe_clk = clk = clk_get(NULL, "vpe_clk"); vpe_clk_rate = clk_round_rate(clk, vpe_clk_rate); clk_set_rate(clk, vpe_clk_rate); break; default: break; } if (!IS_ERR(clk)) clk_enable(clk); else rc = -1; return rc; }
static int __init gpio_vbus_probe(struct platform_device *pdev) { struct gpio_vbus_mach_info *pdata = pdev->dev.platform_data; struct gpio_vbus_data *gpio_vbus; struct resource *res; int err, gpio, irq; if (!pdata || !gpio_is_valid(pdata->gpio_vbus)) return -EINVAL; gpio = pdata->gpio_vbus; gpio_vbus = kzalloc(sizeof(struct gpio_vbus_data), GFP_KERNEL); if (!gpio_vbus) return -ENOMEM; gpio_vbus->phy.otg = kzalloc(sizeof(struct usb_otg), GFP_KERNEL); if (!gpio_vbus->phy.otg) { kfree(gpio_vbus); return -ENOMEM; } platform_set_drvdata(pdev, gpio_vbus); gpio_vbus->dev = &pdev->dev; gpio_vbus->phy.label = "gpio-vbus"; gpio_vbus->phy.set_power = gpio_vbus_set_power; gpio_vbus->phy.set_suspend = gpio_vbus_set_suspend; gpio_vbus->phy.state = OTG_STATE_UNDEFINED; gpio_vbus->phy.otg->phy = &gpio_vbus->phy; gpio_vbus->phy.otg->set_peripheral = gpio_vbus_set_peripheral; err = gpio_request(gpio, "vbus_detect"); if (err) { dev_err(&pdev->dev, "can't request vbus gpio %d, err: %d\n", gpio, err); goto err_gpio; } gpio_direction_input(gpio); res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (res) { irq = res->start; res->flags &= IRQF_TRIGGER_MASK; res->flags |= IRQF_SAMPLE_RANDOM | IRQF_SHARED; } else irq = gpio_to_irq(gpio); /* if data line pullup is in use, initialize it to "not pulling up" */ gpio = pdata->gpio_pullup; if (gpio_is_valid(gpio)) { err = gpio_request(gpio, "udc_pullup"); if (err) { dev_err(&pdev->dev, "can't request pullup gpio %d, err: %d\n", gpio, err); gpio_free(pdata->gpio_vbus); goto err_gpio; } gpio_direction_output(gpio, pdata->gpio_pullup_inverted); } err = request_irq(irq, gpio_vbus_irq, VBUS_IRQ_FLAGS, "vbus_detect", pdev); if (err) { dev_err(&pdev->dev, "can't request irq %i, err: %d\n", irq, err); goto err_irq; } ATOMIC_INIT_NOTIFIER_HEAD(&gpio_vbus->phy.notifier); INIT_DELAYED_WORK(&gpio_vbus->work, gpio_vbus_work); gpio_vbus->vbus_draw = regulator_get(&pdev->dev, "vbus_draw"); if (IS_ERR(gpio_vbus->vbus_draw)) { dev_dbg(&pdev->dev, "can't get vbus_draw regulator, err: %ld\n", PTR_ERR(gpio_vbus->vbus_draw)); gpio_vbus->vbus_draw = NULL; } /* only active when a gadget is registered */ err = usb_set_transceiver(&gpio_vbus->phy); if (err) { dev_err(&pdev->dev, "can't register transceiver, err: %d\n", err); goto err_otg; } return 0; err_otg: free_irq(irq, &pdev->dev); err_irq: if (gpio_is_valid(pdata->gpio_pullup)) gpio_free(pdata->gpio_pullup); gpio_free(pdata->gpio_vbus); err_gpio: platform_set_drvdata(pdev, NULL); kfree(gpio_vbus->phy.otg); kfree(gpio_vbus); return err; }
int msm_camio_clk_disable(enum msm_camio_clk_type clktype) { int rc = 0; struct clk *clk = NULL; switch (clktype) { case CAMIO_VFE_MDC_CLK: clk = camio_vfe_mdc_clk; break; case CAMIO_MDC_CLK: clk = camio_mdc_clk; break; case CAMIO_VFE_CLK: clk = camio_vfe_clk; break; case CAMIO_VFE_CAMIF_CLK: clk = camio_vfe_camif_clk; break; case CAMIO_VFE_PBDG_CLK: clk = camio_vfe_pbdg_clk; break; case CAMIO_CAM_MCLK_CLK: clk = camio_cam_m_clk; break; case CAMIO_CAMIF_PAD_PBDG_CLK: clk = camio_camif_pad_pbdg_clk; break; case CAMIO_CSI0_CLK: clk = camio_csi_clk; break; case CAMIO_CSI0_VFE_CLK: clk = camio_csi_vfe_clk; break; case CAMIO_CSI0_PCLK: clk = camio_csi_pclk; break; case CAMIO_JPEG_CLK: clk = camio_jpeg_clk; break; case CAMIO_JPEG_PCLK: clk = camio_jpeg_pclk; break; case CAMIO_VPE_CLK: clk = camio_vpe_clk; break; default: break; } if (!IS_ERR(clk)) { clk_disable(clk); clk_put(clk); } else rc = -1; return rc; }
static int ext4_convert_inline_data_to_extent(struct address_space *mapping, struct inode *inode, unsigned flags) { int ret, needed_blocks; handle_t *handle = NULL; int retries = 0, sem_held = 0; struct page *page = NULL; unsigned from, to; struct ext4_iloc iloc; if (!ext4_has_inline_data(inode)) { /* * clear the flag so that no new write * will trap here again. */ ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); return 0; } needed_blocks = ext4_writepage_trans_blocks(inode); ret = ext4_get_inode_loc(inode, &iloc); if (ret) return ret; retry: handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks); if (IS_ERR(handle)) { ret = PTR_ERR(handle); handle = NULL; goto out; } /* We cannot recurse into the filesystem as the transaction is already * started */ flags |= AOP_FLAG_NOFS; page = grab_cache_page_write_begin(mapping, 0, flags); if (!page) { ret = -ENOMEM; goto out; } down_write(&EXT4_I(inode)->xattr_sem); sem_held = 1; /* If some one has already done this for us, just exit. */ if (!ext4_has_inline_data(inode)) { ret = 0; goto out; } from = 0; to = ext4_get_inline_size(inode); if (!PageUptodate(page)) { ret = ext4_read_inline_page(inode, page); if (ret < 0) goto out; } ret = ext4_destroy_inline_data_nolock(handle, inode); if (ret) goto out; if (ext4_should_dioread_nolock(inode)) ret = __block_write_begin(page, from, to, ext4_get_block_write); else ret = __block_write_begin(page, from, to, ext4_get_block); if (!ret && ext4_should_journal_data(inode)) { ret = ext4_walk_page_buffers(handle, page_buffers(page), from, to, NULL, do_journal_get_write_access); } if (ret) { unlock_page(page); page_cache_release(page); ext4_orphan_add(handle, inode); up_write(&EXT4_I(inode)->xattr_sem); sem_held = 0; ext4_journal_stop(handle); handle = NULL; ext4_truncate_failed_write(inode); /* * If truncate failed early the inode might * still be on the orphan list; we need to * make sure the inode is removed from the * orphan list in that case. */ if (inode->i_nlink) ext4_orphan_del(NULL, inode); } if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry; block_commit_write(page, from, to); out: if (page) { unlock_page(page); page_cache_release(page); } if (sem_held) up_write(&EXT4_I(inode)->xattr_sem); if (handle) ext4_journal_stop(handle); brelse(iloc.bh); return ret; }
static int xhci_histb_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct xhci_hcd_histb *histb; const struct hc_driver *driver; struct usb_hcd *hcd; struct xhci_hcd *xhci; struct resource *res; int irq; int ret = -ENODEV; if (usb_disabled()) return -ENODEV; driver = &xhci_histb_hc_driver; histb = devm_kzalloc(dev, sizeof(*histb), GFP_KERNEL); if (!histb) return -ENOMEM; histb->dev = dev; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); histb->ctrl = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(histb->ctrl)) return PTR_ERR(histb->ctrl); ret = xhci_histb_clks_get(histb); if (ret) return ret; histb->soft_reset = devm_reset_control_get(dev, "soft"); if (IS_ERR(histb->soft_reset)) { dev_err(dev, "failed to get soft reset\n"); return PTR_ERR(histb->soft_reset); } pm_runtime_enable(dev); pm_runtime_get_sync(dev); device_enable_async_suspend(dev); /* Initialize dma_mask and coherent_dma_mask to 32-bits */ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (ret) return ret; hcd = usb_create_hcd(driver, dev, dev_name(dev)); if (!hcd) { ret = -ENOMEM; goto disable_pm; } hcd->regs = histb->ctrl; hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); histb->hcd = hcd; dev_set_drvdata(hcd->self.controller, histb); ret = xhci_histb_host_enable(histb); if (ret) goto put_hcd; xhci = hcd_to_xhci(hcd); device_wakeup_enable(hcd->self.controller); xhci->main_hcd = hcd; xhci->shared_hcd = usb_create_shared_hcd(driver, dev, dev_name(dev), hcd); if (!xhci->shared_hcd) { ret = -ENOMEM; goto disable_host; } if (device_property_read_bool(dev, "usb2-lpm-disable")) xhci->quirks |= XHCI_HW_LPM_DISABLE; if (device_property_read_bool(dev, "usb3-lpm-capable")) xhci->quirks |= XHCI_LPM_SUPPORT; /* imod_interval is the interrupt moderation value in nanoseconds. */ xhci->imod_interval = 40000; device_property_read_u32(dev, "imod-interval-ns", &xhci->imod_interval); ret = usb_add_hcd(hcd, irq, IRQF_SHARED); if (ret) goto put_usb3_hcd; if (HCC_MAX_PSA(xhci->hcc_params) >= 4) xhci->shared_hcd->can_do_streams = 1; ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); if (ret) goto dealloc_usb2_hcd; device_enable_async_suspend(dev); pm_runtime_put_noidle(dev); /* * Prevent runtime pm from being on as default, users should enable * runtime pm using power/control in sysfs. */ pm_runtime_forbid(dev); return 0; dealloc_usb2_hcd: usb_remove_hcd(hcd); put_usb3_hcd: usb_put_hcd(xhci->shared_hcd); disable_host: xhci_histb_host_disable(histb); put_hcd: usb_put_hcd(hcd); disable_pm: pm_runtime_put_sync(dev); pm_runtime_disable(dev); return ret; }