static int dwc3_core_get_phy(struct dwc3 *dwc) { struct device *dev = dwc->dev; struct device_node *node = dev->of_node; int ret; if (node) { dwc->usb2_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0); dwc->usb3_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 1); } else { dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2); dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3); } if (IS_ERR(dwc->usb2_phy)) { ret = PTR_ERR(dwc->usb2_phy); if (ret == -ENXIO || ret == -ENODEV) { dwc->usb2_phy = NULL; } else if (ret == -EPROBE_DEFER) { return ret; } else { dev_err(dev, "no usb2 phy configured\n"); return ret; } } if (IS_ERR(dwc->usb3_phy)) { ret = PTR_ERR(dwc->usb3_phy); if (ret == -ENXIO || ret == -ENODEV) { dwc->usb3_phy = NULL; } else if (ret == -EPROBE_DEFER) { return ret; } else { dev_err(dev, "no usb3 phy configured\n"); return ret; } } dwc->usb2_generic_phy = devm_phy_get(dev, "usb2-phy"); if (IS_ERR(dwc->usb2_generic_phy)) { ret = PTR_ERR(dwc->usb2_generic_phy); if (ret == -ENOSYS || ret == -ENODEV) { dwc->usb2_generic_phy = NULL; } else if (ret == -EPROBE_DEFER) { return ret; } else { dev_err(dev, "no usb2 phy configured\n"); return ret; } } dwc->usb3_generic_phy = devm_phy_get(dev, "usb3-phy"); if (IS_ERR(dwc->usb3_generic_phy)) { ret = PTR_ERR(dwc->usb3_generic_phy); if (ret == -ENOSYS || ret == -ENODEV) { dwc->usb3_generic_phy = NULL; } else if (ret == -EPROBE_DEFER) { return ret; } else { dev_err(dev, "no usb3 phy configured\n"); return ret; } } return 0; }
/* HW Initialization, if return 0, initialization is successful. */ static int mx6q_sabreauto_sata_init(struct device *dev, void __iomem *addr) { u32 tmpdata; int ret = 0; struct clk *clk; sata_clk = clk_get(dev, "imx_sata_clk"); if (IS_ERR(sata_clk)) { dev_err(dev, "no sata clock.\n"); return PTR_ERR(sata_clk); } ret = clk_enable(sata_clk); if (ret) { dev_err(dev, "can't enable sata clock.\n"); goto put_sata_clk; } /* Set PHY Paremeters, two steps to configure the GPR13, * one write for rest of parameters, mask of first write is 0x07FFFFFD, * and the other one write for setting the mpll_clk_off_b *.rx_eq_val_0(iomuxc_gpr13[26:24]), *.los_lvl(iomuxc_gpr13[23:19]), *.rx_dpll_mode_0(iomuxc_gpr13[18:16]), *.sata_speed(iomuxc_gpr13[15]), *.mpll_ss_en(iomuxc_gpr13[14]), *.tx_atten_0(iomuxc_gpr13[13:11]), *.tx_boost_0(iomuxc_gpr13[10:7]), *.tx_lvl(iomuxc_gpr13[6:2]), *.mpll_ck_off(iomuxc_gpr13[1]), *.tx_edgerate_0(iomuxc_gpr13[0]), */ tmpdata = readl(IOMUXC_GPR13); writel(((tmpdata & ~0x07FFFFFD) | 0x0593A044), IOMUXC_GPR13); /* enable SATA_PHY PLL */ tmpdata = readl(IOMUXC_GPR13); writel(((tmpdata & ~0x2) | 0x2), IOMUXC_GPR13); /* Get the AHB clock rate, and configure the TIMER1MS reg later */ clk = clk_get(NULL, "ahb"); if (IS_ERR(clk)) { dev_err(dev, "no ahb clock.\n"); ret = PTR_ERR(clk); goto release_sata_clk; } tmpdata = clk_get_rate(clk) / 1000; clk_put(clk); #ifdef CONFIG_SATA_AHCI_PLATFORM ret = sata_init(addr, tmpdata); if (ret == 0) return ret; #else usleep_range(1000, 2000); /* AHCI PHY enter into PDDQ mode if the AHCI module is not enabled */ tmpdata = readl(addr + PORT_PHY_CTL); writel(tmpdata | PORT_PHY_CTL_PDDQ_LOC, addr + PORT_PHY_CTL); pr_info("No AHCI save PWR: PDDQ %s\n", ((readl(addr + PORT_PHY_CTL) >> 20) & 1) ? "enabled" : "disabled"); #endif release_sata_clk: /* disable SATA_PHY PLL */ writel((readl(IOMUXC_GPR13) & ~0x2), IOMUXC_GPR13); clk_disable(sata_clk); put_sata_clk: clk_put(sata_clk); return ret; }
/** * gserial_setup - initialize TTY driver for one or more ports * @g: gadget to associate with these ports * @count: how many ports to support * Context: may sleep * * The TTY stack needs to know in advance how many devices it should * plan to manage. Use this call to set up the ports you will be * exporting through USB. Later, connect them to functions based * on what configuration is activated by the USB host; and disconnect * them as appropriate. * * An example would be a two-configuration device in which both * configurations expose port 0, but through different functions. * One configuration could even expose port 1 while the other * one doesn't. * * Returns negative errno or zero. */ int gserial_setup(struct usb_gadget *g, unsigned count) { unsigned i; struct usb_cdc_line_coding coding; int status; if (count == 0 || count > N_PORTS) return -EINVAL; gs_tty_driver = alloc_tty_driver(count); if (!gs_tty_driver) return -ENOMEM; gs_tty_driver->owner = THIS_MODULE; gs_tty_driver->driver_name = "g_serial"; gs_tty_driver->name = PREFIX; /* uses dynamically assigned dev_t values */ gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; gs_tty_driver->subtype = SERIAL_TYPE_NORMAL; gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_RESET_TERMIOS; gs_tty_driver->init_termios = tty_std_termios; /* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on * MS-Windows. Otherwise, most of these flags shouldn't affect * anything unless we were to actually hook up to a serial line. */ gs_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; gs_tty_driver->init_termios.c_ispeed = 9600; gs_tty_driver->init_termios.c_ospeed = 9600; coding.dwDTERate = cpu_to_le32(9600); coding.bCharFormat = 8; coding.bParityType = USB_CDC_NO_PARITY; coding.bDataBits = USB_CDC_1_STOP_BITS; tty_set_operations(gs_tty_driver, &gs_tty_ops); gserial_wq = create_singlethread_workqueue("k_gserial"); if (!gserial_wq) { status = -ENOMEM; goto fail; } /* make devices be openable */ for (i = 0; i < count; i++) { mutex_init(&ports[i].lock); status = gs_port_alloc(i, &coding); if (status) { count = i; goto fail; } } n_ports = count; /* export the driver ... */ status = tty_register_driver(gs_tty_driver); if (status) { put_tty_driver(gs_tty_driver); pr_err("%s: cannot register, err %d\n", __func__, status); goto fail; } /* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */ for (i = 0; i < count; i++) { struct device *tty_dev; tty_dev = tty_register_device(gs_tty_driver, i, &g->dev); if (IS_ERR(tty_dev)) pr_warning("%s: no classdev for port %d, err %ld\n", __func__, i, PTR_ERR(tty_dev)); } for (i = 0; i < count; i++) usb_debugfs_init(ports[i].port, i); pr_debug("%s: registered %d ttyGS* device%s\n", __func__, count, (count == 1) ? "" : "s"); #ifdef CONFIG_USB_G_SERIAL_CONSOLE for (i = 0; i < count; i++) { struct gs_port *p; p = ports[i].port; strcpy(p->port_console.name,PREFIX); p->port_console.write = gs_console_write; p->port_console.device = gs_console_device; p->port_console.setup = gs_console_setup; p->port_console.flags = CON_PRINTBUFFER; p->port_console.index = p->port_num; p->port_console.data = p; //register_console(&p->port_console); INIT_WORK(&p->work_register,gs_register_console_work_handler); INIT_WORK(&p->work_unregister,gs_unregister_console_work_handler); } #endif return status; fail: while (count--) kfree(ports[count].port); if (gserial_wq) destroy_workqueue(gserial_wq); put_tty_driver(gs_tty_driver); gs_tty_driver = NULL; return status; }
static int proximity_do_calibrate(struct gp2a_data *data, bool do_calib, bool thresh_set) { struct file *cal_filp; int err; int xtalk_avg = 0; int offset_change = 0; uint16_t thrd = 0; u8 reg; mm_segment_t old_fs; if (do_calib) { if (thresh_set) { /* for proximity_thresh_store */ data->offset_value = data->threshold_high - (gp2a_reg[6][1] << 8 | gp2a_reg[5][1]); } else { /* tap offset button */ /* get offset value */ xtalk_avg = proximity_adc_read(data); offset_change = (gp2a_reg[6][1] << 8 | gp2a_reg[5][1]) - DEFAULT_HI_THR; if (xtalk_avg < offset_change) { /* do not need calibration */ data->cal_result = 0; err = 0; goto no_cal; } data->offset_value = xtalk_avg - offset_change; } /* update threshold */ thrd = (gp2a_reg[4][1] << 8 | gp2a_reg[3][1]) + (data->offset_value); THR_REG_LSB(thrd, reg); gp2a_i2c_write(data, gp2a_reg[3][0], ®); THR_REG_MSB(thrd, reg); gp2a_i2c_write(data, gp2a_reg[4][0], ®); thrd = (gp2a_reg[4][1] << 8 | gp2a_reg[5][1]) +(data->offset_value); THR_REG_LSB(thrd, reg); gp2a_i2c_write(data, gp2a_reg[5][0], ®); THR_REG_MSB(thrd, reg); gp2a_i2c_write(data, gp2a_reg[6][0], ®); /* calibration result */ if (!thresh_set) data->cal_result = 1; } else { /* tap reset button */ data->offset_value = 0; /* update threshold */ gp2a_i2c_write(data, gp2a_reg[3][0], &gp2a_reg[3][1]); gp2a_i2c_write(data, gp2a_reg[4][0], &gp2a_reg[4][1]); gp2a_i2c_write(data, gp2a_reg[5][0], &gp2a_reg[5][1]); gp2a_i2c_write(data, gp2a_reg[6][0], &gp2a_reg[6][1]); /* calibration result */ data->cal_result = 2; } old_fs = get_fs(); set_fs(KERNEL_DS); cal_filp = filp_open(data->pdata->prox_cal_path, O_CREAT | O_TRUNC | O_WRONLY | O_SYNC, S_IRUGO | S_IWUSR | S_IWGRP); if (IS_ERR(cal_filp)) { pr_err("%s: Can't open calibration file\n", __func__); set_fs(old_fs); err = PTR_ERR(cal_filp); goto done; } err = cal_filp->f_op->write(cal_filp, (char *)&data->offset_value, sizeof(int), &cal_filp->f_pos); if (err != sizeof(int)) { pr_err("%s: Can't write the cal data to file\n", __func__); err = -EIO; } filp_close(cal_filp, current->files); done: set_fs(old_fs); no_cal: return err; }
static s32 nps_enet_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct net_device *ndev; struct nps_enet_priv *priv; s32 err = 0; const char *mac_addr; struct resource *res_regs; if (!dev->of_node) return -ENODEV; ndev = alloc_etherdev(sizeof(struct nps_enet_priv)); if (!ndev) return -ENOMEM; platform_set_drvdata(pdev, ndev); SET_NETDEV_DEV(ndev, dev); priv = netdev_priv(ndev); /* The EZ NET specific entries in the device structure. */ ndev->netdev_ops = &nps_netdev_ops; ndev->watchdog_timeo = (400 * HZ / 1000); /* FIXME :: no multicast support yet */ ndev->flags &= ~IFF_MULTICAST; res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); priv->regs_base = devm_ioremap_resource(dev, res_regs); if (IS_ERR(priv->regs_base)) { err = PTR_ERR(priv->regs_base); goto out_netdev; } dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs_base); /* set kernel MAC address to dev */ mac_addr = of_get_mac_address(dev->of_node); if (mac_addr) ether_addr_copy(ndev->dev_addr, mac_addr); else eth_hw_addr_random(ndev); /* Get IRQ number */ priv->irq = platform_get_irq(pdev, 0); if (!priv->irq) { dev_err(dev, "failed to retrieve <irq Rx-Tx> value from device tree\n"); err = -ENODEV; goto out_netdev; } netif_napi_add(ndev, &priv->napi, nps_enet_poll, NPS_ENET_NAPI_POLL_WEIGHT); /* Register the driver. Should be the last thing in probe */ err = register_netdev(ndev); if (err) { dev_err(dev, "Failed to register ndev for %s, err = 0x%08x\n", ndev->name, (s32)err); goto out_netif_api; } dev_info(dev, "(rx/tx=%d)\n", priv->irq); return 0; out_netif_api: netif_napi_del(&priv->napi); out_netdev: if (err) free_netdev(ndev); return err; }
int tegra_asoc_utils_init(struct tegra_asoc_utils_data *data, struct device *dev, struct snd_soc_card *card) { int ret; data->dev = dev; data->card = card; data->clk_pll_p_out1 = clk_get_sys(NULL, "pll_p_out1"); if (IS_ERR(data->clk_pll_p_out1)) { dev_err(data->dev, "Can't retrieve clk pll_p_out1\n"); ret = PTR_ERR(data->clk_pll_p_out1); goto err; } data->clk_pll_a = clk_get_sys(NULL, "pll_a"); if (IS_ERR(data->clk_pll_a)) { dev_err(data->dev, "Can't retrieve clk pll_a\n"); ret = PTR_ERR(data->clk_pll_a); goto err_put_pll_p_out1; } data->clk_pll_a_out0 = clk_get_sys(NULL, "pll_a_out0"); if (IS_ERR(data->clk_pll_a_out0)) { dev_err(data->dev, "Can't retrieve clk pll_a_out0\n"); ret = PTR_ERR(data->clk_pll_a_out0); goto err_put_pll_a; } data->clk_m = clk_get_sys(NULL, "clk_m"); if (IS_ERR(data->clk_m)) { dev_err(data->dev, "Can't retrieve clk clk_m\n"); ret = PTR_ERR(data->clk_m); goto err; } #if defined(CONFIG_ARCH_TEGRA_2x_SOC) data->clk_cdev1 = clk_get_sys(NULL, "cdev1"); #else data->clk_cdev1 = clk_get_sys("extern1", NULL); #endif if (IS_ERR(data->clk_cdev1)) { dev_err(data->dev, "Can't retrieve clk cdev1\n"); ret = PTR_ERR(data->clk_cdev1); goto err_put_pll_a_out0; } #if defined(CONFIG_ARCH_TEGRA_2x_SOC) data->clk_out1 = ERR_PTR(-ENOENT); #else data->clk_out1 = clk_get_sys("clk_out_1", "extern1"); if (IS_ERR(data->clk_out1)) { dev_err(data->dev, "Can't retrieve clk out1\n"); ret = PTR_ERR(data->clk_out1); goto err_put_cdev1; } #endif ret = clk_enable(data->clk_cdev1); if (ret) { dev_err(data->dev, "Can't enable clk cdev1/extern1"); goto err_put_out1; } if (!IS_ERR(data->clk_out1)) { ret = clk_enable(data->clk_out1); if (ret) { dev_err(data->dev, "Can't enable clk out1"); goto err_put_out1; } } ret = tegra_asoc_utils_set_rate(data, 48000, 256 * 48000); if (ret) goto err_put_out1; return 0; err_put_out1: if (!IS_ERR(data->clk_out1)) clk_put(data->clk_out1); #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) err_put_cdev1: #endif clk_put(data->clk_cdev1); err_put_pll_a_out0: clk_put(data->clk_pll_a_out0); err_put_pll_a: clk_put(data->clk_pll_a); err_put_pll_p_out1: clk_put(data->clk_pll_p_out1); err: return ret; }
EXPORT_FOR_TESTS int convert_free_space_to_extents(struct btrfs_trans_handle *trans, struct btrfs_block_group_cache *block_group, struct btrfs_path *path) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *root = fs_info->free_space_root; struct btrfs_free_space_info *info; struct btrfs_key key, found_key; struct extent_buffer *leaf; unsigned long *bitmap; u64 start, end; u32 bitmap_size, flags, expected_extent_count; unsigned long nrbits, start_bit, end_bit; u32 extent_count = 0; int done = 0, nr; int ret; bitmap_size = free_space_bitmap_size(block_group->key.offset, fs_info->sectorsize); bitmap = alloc_bitmap(bitmap_size); if (!bitmap) { ret = -ENOMEM; goto out; } start = block_group->key.objectid; end = block_group->key.objectid + block_group->key.offset; key.objectid = end - 1; key.type = (u8)-1; key.offset = (u64)-1; while (!done) { ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); if (ret) goto out; leaf = path->nodes[0]; nr = 0; path->slots[0]++; while (path->slots[0] > 0) { btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { ASSERT(found_key.objectid == block_group->key.objectid); ASSERT(found_key.offset == block_group->key.offset); done = 1; break; } else if (found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) { unsigned long ptr; char *bitmap_cursor; u32 bitmap_pos, data_size; ASSERT(found_key.objectid >= start); ASSERT(found_key.objectid < end); ASSERT(found_key.objectid + found_key.offset <= end); bitmap_pos = div_u64(found_key.objectid - start, fs_info->sectorsize * BITS_PER_BYTE); bitmap_cursor = ((char *)bitmap) + bitmap_pos; data_size = free_space_bitmap_size(found_key.offset, fs_info->sectorsize); ptr = btrfs_item_ptr_offset(leaf, path->slots[0] - 1); read_extent_buffer(leaf, bitmap_cursor, ptr, data_size); nr++; path->slots[0]--; } else { ASSERT(0); } } ret = btrfs_del_items(trans, root, path, path->slots[0], nr); if (ret) goto out; btrfs_release_path(path); } info = search_free_space_info(trans, fs_info, block_group, path, 1); if (IS_ERR(info)) { ret = PTR_ERR(info); goto out; } leaf = path->nodes[0]; flags = btrfs_free_space_flags(leaf, info); flags &= ~BTRFS_FREE_SPACE_USING_BITMAPS; btrfs_set_free_space_flags(leaf, info, flags); expected_extent_count = btrfs_free_space_extent_count(leaf, info); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); nrbits = div_u64(block_group->key.offset, block_group->fs_info->sectorsize); start_bit = find_next_bit_le(bitmap, nrbits, 0); while (start_bit < nrbits) { end_bit = find_next_zero_bit_le(bitmap, nrbits, start_bit); ASSERT(start_bit < end_bit); key.objectid = start + start_bit * block_group->fs_info->sectorsize; key.type = BTRFS_FREE_SPACE_EXTENT_KEY; key.offset = (end_bit - start_bit) * block_group->fs_info->sectorsize; ret = btrfs_insert_empty_item(trans, root, path, &key, 0); if (ret) goto out; btrfs_release_path(path); extent_count++; start_bit = find_next_bit_le(bitmap, nrbits, end_bit); } if (extent_count != expected_extent_count) { btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u", block_group->key.objectid, extent_count, expected_extent_count); ASSERT(0); ret = -EIO; goto out; } ret = 0; out: kvfree(bitmap); if (ret) btrfs_abort_transaction(trans, ret); return ret; }
static struct tegra_bo *tegra_bo_import(struct drm_device *drm, struct dma_buf *buf) { struct dma_buf_attachment *attach; struct tegra_bo *bo; ssize_t size; int err; bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (!bo) return ERR_PTR(-ENOMEM); host1x_bo_init(&bo->base, &tegra_bo_ops); size = round_up(buf->size, PAGE_SIZE); err = drm_gem_object_init(drm, &bo->gem, size); if (err < 0) goto free; err = drm_gem_create_mmap_offset(&bo->gem); if (err < 0) goto release; attach = dma_buf_attach(buf, drm->dev); if (IS_ERR(attach)) { err = PTR_ERR(attach); goto free_mmap; } get_dma_buf(buf); bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE); if (!bo->sgt) { err = -ENOMEM; goto detach; } if (IS_ERR(bo->sgt)) { err = PTR_ERR(bo->sgt); goto detach; } if (bo->sgt->nents > 1) { err = -EINVAL; goto detach; } bo->paddr = sg_dma_address(bo->sgt->sgl); bo->gem.import_attach = attach; return bo; detach: if (!IS_ERR_OR_NULL(bo->sgt)) dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE); dma_buf_detach(buf, attach); dma_buf_put(buf); free_mmap: drm_gem_free_mmap_offset(&bo->gem); release: drm_gem_object_release(&bo->gem); free: kfree(bo); return ERR_PTR(err); }
/* * Handle the detection and initialisation of a card. * * In the case of a resume, "oldcard" will contain the card * we're trying to reinitialise. */ static int mmc_init_card(struct mmc_host *host, u32 ocr, struct mmc_card *oldcard) { struct mmc_card *card; int err; u32 cid[4]; unsigned int max_dtr; BUG_ON(!host); WARN_ON(!host->claimed); /* * Since we're changing the OCR value, we seem to * need to tell some cards to go back to the idle * state. We wait 1ms to give cards time to * mmc_go_idle is needed for eMMC that are asleep * respond. */ mmc_go_idle(host); /* The extra bit indicates that we support high capacity */ err = mmc_send_op_cond(host, ocr | (1 << 30), NULL); if (err) goto err; /* * For SPI, enable CRC as appropriate. */ if (mmc_host_is_spi(host)) { err = mmc_spi_set_crc(host, use_spi_crc); if (err) goto err; } /* * Fetch CID from card. */ if (mmc_host_is_spi(host)) err = mmc_send_cid(host, cid); else err = mmc_all_send_cid(host, cid); if (err) goto err; if (oldcard) { if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) { err = -ENOENT; goto err; } card = oldcard; } else { /* * Allocate card structure. */ card = mmc_alloc_card(host, &mmc_type); if (IS_ERR(card)) { err = PTR_ERR(card); goto err; } card->type = MMC_TYPE_MMC; card->rca = 1; memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); } /* * For native busses: set card RCA and quit open drain mode. */ if (!mmc_host_is_spi(host)) { err = mmc_set_relative_addr(card); if (err) goto free_card; mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); } if (!oldcard) { /* * Fetch CSD from card. */ err = mmc_send_csd(card, card->raw_csd); if (err) goto free_card; err = mmc_decode_csd(card); if (err) goto free_card; err = mmc_decode_cid(card); if (err) goto free_card; } /* * Select card, as all following commands rely on that. */ if (!mmc_host_is_spi(host)) { err = mmc_select_card(card); if (err) goto free_card; } if (!oldcard) { /* * Fetch and process extended CSD. */ err = mmc_read_ext_csd(card); if (err) goto free_card; } /* * Activate high speed (if supported) */ if ((card->ext_csd.hs_max_dtr != 0) && (host->caps & MMC_CAP_MMC_HIGHSPEED)) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 1); if (err && err != -EBADMSG) goto free_card; if (err) { printk(KERN_WARNING "%s: switch to highspeed failed\n", mmc_hostname(card->host)); err = 0; } else { mmc_card_set_highspeed(card); mmc_set_timing(card->host, MMC_TIMING_MMC_HS); } } /* * Compute bus speed. */ max_dtr = (unsigned int)-1; if (mmc_card_highspeed(card)) { if (max_dtr > card->ext_csd.hs_max_dtr) max_dtr = card->ext_csd.hs_max_dtr; } else if (max_dtr > card->csd.max_dtr) { max_dtr = card->csd.max_dtr; } mmc_set_clock(host, max_dtr); /* * Activate wide bus (if supported). */ if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) && (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA | MMC_CAP_DDR))) { unsigned ext_csd_bit, bus_width; if ((host->caps & MMC_CAP_DDR) && (card->ext_csd.rev >= EXT_CSD_REV_1_5)) { if (host->caps & MMC_CAP_8_BIT_DATA) { ext_csd_bit = EXT_CSD_BUS_WIDTH_8_DDR; bus_width = MMC_BUS_WIDTH_8_DDR; } else { ext_csd_bit = EXT_CSD_BUS_WIDTH_4_DDR; bus_width = MMC_BUS_WIDTH_4_DDR; } } else { if (host->caps & MMC_CAP_8_BIT_DATA) { ext_csd_bit = EXT_CSD_BUS_WIDTH_8; bus_width = MMC_BUS_WIDTH_8; } else { ext_csd_bit = EXT_CSD_BUS_WIDTH_4; bus_width = MMC_BUS_WIDTH_4; } } err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, ext_csd_bit); if (err && err != -EBADMSG) goto free_card; if (err) { printk(KERN_WARNING "%s: switch to bus width %d " "failed\n", mmc_hostname(card->host), 1 << bus_width); err = 0; } else { mmc_set_bus_width(card->host, bus_width); } } if (!oldcard) { host->card = card; #ifdef CONFIG_MMC_DISCARD /* Erase size depends on CSD and Extended CSD */ mmc_set_erase_size(card); #endif /* CONFIG_MMC_DISCARD */ } return 0; free_card: if (!oldcard) mmc_remove_card(card); err: return err; }
static int arizona_ldo1_probe(struct platform_device *pdev) { struct arizona *arizona = dev_get_drvdata(pdev->dev.parent); const struct regulator_desc *desc; struct regulator_config config = { }; struct arizona_ldo1 *ldo1; int ret; arizona->external_dcvdd = false; ldo1 = devm_kzalloc(&pdev->dev, sizeof(*ldo1), GFP_KERNEL); if (ldo1 == NULL) { dev_err(&pdev->dev, "Unable to allocate private data\n"); return -ENOMEM; } ldo1->arizona = arizona; /* * Since the chip usually supplies itself we provide some * default init_data for it. This will be overridden with * platform data if provided. */ switch (arizona->type) { case WM5102: case WM8997: desc = &arizona_ldo1_hc; ldo1->init_data = arizona_ldo1_dvfs; break; default: desc = &arizona_ldo1; ldo1->init_data = arizona_ldo1_default; break; } ldo1->init_data.consumer_supplies = &ldo1->supply; ldo1->supply.supply = "DCVDD"; ldo1->supply.dev_name = dev_name(arizona->dev); config.dev = arizona->dev; config.driver_data = ldo1; config.regmap = arizona->regmap; if (!dev_get_platdata(arizona->dev)) { ret = arizona_ldo1_of_get_pdata(arizona, &config); if (ret < 0) return ret; } config.ena_gpio = arizona->pdata.ldoena; if (arizona->pdata.ldo1) config.init_data = arizona->pdata.ldo1; else config.init_data = &ldo1->init_data; /* * LDO1 can only be used to supply DCVDD so if it has no * consumers then DCVDD is supplied externally. */ if (config.init_data->num_consumer_supplies == 0) arizona->external_dcvdd = true; ldo1->regulator = regulator_register(desc, &config); if (IS_ERR(ldo1->regulator)) { ret = PTR_ERR(ldo1->regulator); dev_err(arizona->dev, "Failed to register LDO1 supply: %d\n", ret); return ret; } if (!dev_get_platdata(arizona->dev)) arizona_ldo1_of_put_pdata(&config); platform_set_drvdata(pdev, ldo1); return 0; }
static struct super_block *v9fs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { struct super_block *sb = NULL; struct v9fs_fcall *fcall = NULL; struct inode *inode = NULL; struct dentry *root = NULL; struct v9fs_session_info *v9ses = NULL; struct v9fs_fid *root_fid = NULL; int mode = S_IRWXUGO | S_ISVTX; uid_t uid = current->fsuid; gid_t gid = current->fsgid; int stat_result = 0; int newfid = 0; int retval = 0; dprintk(DEBUG_VFS, " \n"); v9ses = kzalloc(sizeof(struct v9fs_session_info), GFP_KERNEL); if (!v9ses) return ERR_PTR(-ENOMEM); if ((newfid = v9fs_session_init(v9ses, dev_name, data)) < 0) { dprintk(DEBUG_ERROR, "problem initiating session\n"); sb = ERR_PTR(newfid); goto out_free_session; } sb = sget(fs_type, NULL, v9fs_set_super, v9ses); if (IS_ERR(sb)) goto out_close_session; v9fs_fill_super(sb, v9ses, flags); inode = v9fs_get_inode(sb, S_IFDIR | mode); if (IS_ERR(inode)) { retval = PTR_ERR(inode); goto put_back_sb; } inode->i_uid = uid; inode->i_gid = gid; root = d_alloc_root(inode); if (!root) { retval = -ENOMEM; goto put_back_sb; } sb->s_root = root; stat_result = v9fs_t_stat(v9ses, newfid, &fcall); if (stat_result < 0) { dprintk(DEBUG_ERROR, "stat error\n"); v9fs_t_clunk(v9ses, newfid); } else { /* Setup the Root Inode */ root_fid = v9fs_fid_create(v9ses, newfid); if (root_fid == NULL) { retval = -ENOMEM; goto put_back_sb; } retval = v9fs_fid_insert(root_fid, root); if (retval < 0) { kfree(fcall); goto put_back_sb; } root_fid->qid = fcall->params.rstat.stat.qid; root->d_inode->i_ino = v9fs_qid2ino(&fcall->params.rstat.stat.qid); v9fs_stat2inode(&fcall->params.rstat.stat, root->d_inode, sb); } kfree(fcall); if (stat_result < 0) { retval = stat_result; goto put_back_sb; } return sb; out_close_session: v9fs_session_close(v9ses); out_free_session: kfree(v9ses); return sb; put_back_sb: /* deactivate_super calls v9fs_kill_super which will frees the rest */ up_write(&sb->s_umount); deactivate_super(sb); return ERR_PTR(retval); }
static int create_dev_resources(struct mlx5_ib_resources *devr) { struct ib_srq_init_attr attr; struct mlx5_ib_dev *dev; struct ib_cq_init_attr cq_attr = {.cqe = 1}; u32 rsvd_lkey; int ret = 0; dev = container_of(devr, struct mlx5_ib_dev, devr); ret = mlx5_core_query_special_context(dev->mdev, &rsvd_lkey); if (ret) { pr_err("Failed to query special context %d\n", ret); return ret; } dev->ib_dev.local_dma_lkey = rsvd_lkey; devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); if (IS_ERR(devr->p0)) { ret = PTR_ERR(devr->p0); goto error0; } devr->p0->device = &dev->ib_dev; devr->p0->uobject = NULL; atomic_set(&devr->p0->usecnt, 0); devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL); if (IS_ERR(devr->c0)) { ret = PTR_ERR(devr->c0); goto error1; } devr->c0->device = &dev->ib_dev; devr->c0->uobject = NULL; devr->c0->comp_handler = NULL; devr->c0->event_handler = NULL; devr->c0->cq_context = NULL; atomic_set(&devr->c0->usecnt, 0); devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); if (IS_ERR(devr->x0)) { ret = PTR_ERR(devr->x0); goto error2; } devr->x0->device = &dev->ib_dev; devr->x0->inode = NULL; atomic_set(&devr->x0->usecnt, 0); mutex_init(&devr->x0->tgt_qp_mutex); INIT_LIST_HEAD(&devr->x0->tgt_qp_list); devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); if (IS_ERR(devr->x1)) { ret = PTR_ERR(devr->x1); goto error3; } devr->x1->device = &dev->ib_dev; devr->x1->inode = NULL; atomic_set(&devr->x1->usecnt, 0); mutex_init(&devr->x1->tgt_qp_mutex); INIT_LIST_HEAD(&devr->x1->tgt_qp_list); memset(&attr, 0, sizeof(attr)); attr.attr.max_sge = 1; attr.attr.max_wr = 1; attr.srq_type = IB_SRQT_XRC; attr.ext.xrc.cq = devr->c0; attr.ext.xrc.xrcd = devr->x0; devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL); if (IS_ERR(devr->s0)) { ret = PTR_ERR(devr->s0); goto error4; } devr->s0->device = &dev->ib_dev; devr->s0->pd = devr->p0; devr->s0->uobject = NULL; devr->s0->event_handler = NULL; devr->s0->srq_context = NULL; devr->s0->srq_type = IB_SRQT_XRC; devr->s0->ext.xrc.xrcd = devr->x0; devr->s0->ext.xrc.cq = devr->c0; atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt); atomic_inc(&devr->s0->ext.xrc.cq->usecnt); atomic_inc(&devr->p0->usecnt); atomic_set(&devr->s0->usecnt, 0); memset(&attr, 0, sizeof(attr)); attr.attr.max_sge = 1; attr.attr.max_wr = 1; attr.srq_type = IB_SRQT_BASIC; devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL); if (IS_ERR(devr->s1)) { ret = PTR_ERR(devr->s1); goto error5; } devr->s1->device = &dev->ib_dev; devr->s1->pd = devr->p0; devr->s1->uobject = NULL; devr->s1->event_handler = NULL; devr->s1->srq_context = NULL; devr->s1->srq_type = IB_SRQT_BASIC; devr->s1->ext.xrc.cq = devr->c0; atomic_inc(&devr->p0->usecnt); atomic_set(&devr->s0->usecnt, 0); return 0; error5: mlx5_ib_destroy_srq(devr->s0); error4: mlx5_ib_dealloc_xrcd(devr->x1); error3: mlx5_ib_dealloc_xrcd(devr->x0); error2: mlx5_ib_destroy_cq(devr->c0); error1: mlx5_ib_dealloc_pd(devr->p0); error0: return ret; } static void destroy_dev_resources(struct mlx5_ib_resources *devr) { mlx5_ib_destroy_srq(devr->s1); mlx5_ib_destroy_srq(devr->s0); mlx5_ib_dealloc_xrcd(devr->x0); mlx5_ib_dealloc_xrcd(devr->x1); mlx5_ib_destroy_cq(devr->c0); mlx5_ib_dealloc_pd(devr->p0); }
static int create_umr_res(struct mlx5_ib_dev *dev) { struct ib_qp_init_attr *init_attr = NULL; struct ib_qp_attr *attr = NULL; struct ib_pd *pd; struct ib_cq *cq; struct ib_qp *qp; struct ib_cq_init_attr cq_attr = {}; int ret; attr = kzalloc(sizeof(*attr), GFP_KERNEL); init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); if (!attr || !init_attr) { ret = -ENOMEM; goto error_0; } pd = ib_alloc_pd(&dev->ib_dev); if (IS_ERR(pd)) { mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n"); ret = PTR_ERR(pd); goto error_0; } cq_attr.cqe = 128; cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL, &cq_attr); if (IS_ERR(cq)) { mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n"); ret = PTR_ERR(cq); goto error_2; } ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); init_attr->send_cq = cq; init_attr->recv_cq = cq; init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; init_attr->cap.max_send_wr = MAX_UMR_WR; init_attr->cap.max_send_sge = 1; init_attr->qp_type = MLX5_IB_QPT_REG_UMR; init_attr->port_num = 1; qp = mlx5_ib_create_qp(pd, init_attr, NULL); if (IS_ERR(qp)) { mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n"); ret = PTR_ERR(qp); goto error_3; } qp->device = &dev->ib_dev; qp->real_qp = qp; qp->uobject = NULL; qp->qp_type = MLX5_IB_QPT_REG_UMR; attr->qp_state = IB_QPS_INIT; attr->port_num = 1; ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT, NULL); if (ret) { mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n"); goto error_4; } memset(attr, 0, sizeof(*attr)); attr->qp_state = IB_QPS_RTR; attr->path_mtu = IB_MTU_256; ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); if (ret) { mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n"); goto error_4; } memset(attr, 0, sizeof(*attr)); attr->qp_state = IB_QPS_RTS; ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); if (ret) { mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n"); goto error_4; } dev->umrc.qp = qp; dev->umrc.cq = cq; dev->umrc.pd = pd; sema_init(&dev->umrc.sem, MAX_UMR_WR); ret = mlx5_mr_cache_init(dev); if (ret) { mlx5_ib_warn(dev, "mr cache init failed %d\n", ret); goto error_4; } kfree(attr); kfree(init_attr); return 0; error_4: mlx5_ib_destroy_qp(qp); error_3: ib_destroy_cq(cq); error_2: ib_dealloc_pd(pd); error_0: kfree(attr); kfree(init_attr); return ret; }
static int dwc3_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct dwc3_platform_data *pdata = dev_get_platdata(dev); struct resource *res; struct dwc3 *dwc; u8 lpm_nyet_threshold; u8 tx_de_emphasis; u8 hird_threshold; u32 fladj = 0; int ret; void __iomem *regs; void *mem; mem = devm_kzalloc(dev, sizeof(*dwc) + DWC3_ALIGN_MASK, GFP_KERNEL); if (!mem) return -ENOMEM; dwc = PTR_ALIGN(mem, DWC3_ALIGN_MASK + 1); dwc->mem = mem; dwc->dev = dev; res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(dev, "missing IRQ\n"); return -ENODEV; } dwc->xhci_resources[1].start = res->start; dwc->xhci_resources[1].end = res->end; dwc->xhci_resources[1].flags = res->flags; dwc->xhci_resources[1].name = res->name; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "missing memory resource\n"); return -ENODEV; } dwc->xhci_resources[0].start = res->start; dwc->xhci_resources[0].end = dwc->xhci_resources[0].start + DWC3_XHCI_REGS_END; dwc->xhci_resources[0].flags = res->flags; dwc->xhci_resources[0].name = res->name; res->start += DWC3_GLOBALS_REGS_START; /* * Request memory region but exclude xHCI regs, * since it will be requested by the xhci-plat driver. */ regs = devm_ioremap_resource(dev, res); if (IS_ERR(regs)) { ret = PTR_ERR(regs); goto err0; } dwc->regs = regs; dwc->regs_size = resource_size(res); /* default to highest possible threshold */ lpm_nyet_threshold = 0xff; /* default to -3.5dB de-emphasis */ tx_de_emphasis = 1; /* * default to assert utmi_sleep_n and use maximum allowed HIRD * threshold value of 0b1100 */ hird_threshold = 12; dwc->maximum_speed = usb_get_maximum_speed(dev); dwc->dr_mode = usb_get_dr_mode(dev); dwc->has_lpm_erratum = device_property_read_bool(dev, "snps,has-lpm-erratum"); device_property_read_u8(dev, "snps,lpm-nyet-threshold", &lpm_nyet_threshold); dwc->is_utmi_l1_suspend = device_property_read_bool(dev, "snps,is-utmi-l1-suspend"); device_property_read_u8(dev, "snps,hird-threshold", &hird_threshold); dwc->usb3_lpm_capable = device_property_read_bool(dev, "snps,usb3_lpm_capable"); dwc->needs_fifo_resize = device_property_read_bool(dev, "tx-fifo-resize"); dwc->disable_scramble_quirk = device_property_read_bool(dev, "snps,disable_scramble_quirk"); dwc->u2exit_lfps_quirk = device_property_read_bool(dev, "snps,u2exit_lfps_quirk"); dwc->u2ss_inp3_quirk = device_property_read_bool(dev, "snps,u2ss_inp3_quirk"); dwc->req_p1p2p3_quirk = device_property_read_bool(dev, "snps,req_p1p2p3_quirk"); dwc->del_p1p2p3_quirk = device_property_read_bool(dev, "snps,del_p1p2p3_quirk"); dwc->del_phy_power_chg_quirk = device_property_read_bool(dev, "snps,del_phy_power_chg_quirk"); dwc->lfps_filter_quirk = device_property_read_bool(dev, "snps,lfps_filter_quirk"); dwc->rx_detect_poll_quirk = device_property_read_bool(dev, "snps,rx_detect_poll_quirk"); dwc->dis_u3_susphy_quirk = device_property_read_bool(dev, "snps,dis_u3_susphy_quirk"); dwc->dis_u2_susphy_quirk = device_property_read_bool(dev, "snps,dis_u2_susphy_quirk"); dwc->dis_enblslpm_quirk = device_property_read_bool(dev, "snps,dis_enblslpm_quirk"); dwc->tx_de_emphasis_quirk = device_property_read_bool(dev, "snps,tx_de_emphasis_quirk"); device_property_read_u8(dev, "snps,tx_de_emphasis", &tx_de_emphasis); device_property_read_string(dev, "snps,hsphy_interface", &dwc->hsphy_interface); device_property_read_u32(dev, "snps,quirk-frame-length-adjustment", &fladj); if (pdata) { dwc->maximum_speed = pdata->maximum_speed; dwc->has_lpm_erratum = pdata->has_lpm_erratum; if (pdata->lpm_nyet_threshold) lpm_nyet_threshold = pdata->lpm_nyet_threshold; dwc->is_utmi_l1_suspend = pdata->is_utmi_l1_suspend; if (pdata->hird_threshold) hird_threshold = pdata->hird_threshold; dwc->needs_fifo_resize = pdata->tx_fifo_resize; dwc->usb3_lpm_capable = pdata->usb3_lpm_capable; dwc->dr_mode = pdata->dr_mode; dwc->disable_scramble_quirk = pdata->disable_scramble_quirk; dwc->u2exit_lfps_quirk = pdata->u2exit_lfps_quirk; dwc->u2ss_inp3_quirk = pdata->u2ss_inp3_quirk; dwc->req_p1p2p3_quirk = pdata->req_p1p2p3_quirk; dwc->del_p1p2p3_quirk = pdata->del_p1p2p3_quirk; dwc->del_phy_power_chg_quirk = pdata->del_phy_power_chg_quirk; dwc->lfps_filter_quirk = pdata->lfps_filter_quirk; dwc->rx_detect_poll_quirk = pdata->rx_detect_poll_quirk; dwc->dis_u3_susphy_quirk = pdata->dis_u3_susphy_quirk; dwc->dis_u2_susphy_quirk = pdata->dis_u2_susphy_quirk; dwc->dis_enblslpm_quirk = pdata->dis_enblslpm_quirk; dwc->tx_de_emphasis_quirk = pdata->tx_de_emphasis_quirk; if (pdata->tx_de_emphasis) tx_de_emphasis = pdata->tx_de_emphasis; dwc->hsphy_interface = pdata->hsphy_interface; fladj = pdata->fladj_value; } dwc->lpm_nyet_threshold = lpm_nyet_threshold; dwc->tx_de_emphasis = tx_de_emphasis; dwc->hird_threshold = hird_threshold | (dwc->is_utmi_l1_suspend << 4); platform_set_drvdata(pdev, dwc); dwc3_cache_hwparams(dwc); ret = dwc3_phy_setup(dwc); if (ret) goto err0; ret = dwc3_core_get_phy(dwc); if (ret) goto err0; spin_lock_init(&dwc->lock); if (!dev->dma_mask) { dev->dma_mask = dev->parent->dma_mask; dev->dma_parms = dev->parent->dma_parms; dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask); } pm_runtime_enable(dev); pm_runtime_get_sync(dev); pm_runtime_forbid(dev); ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE); if (ret) { dev_err(dwc->dev, "failed to allocate event buffers\n"); ret = -ENOMEM; goto err1; } if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) dwc->dr_mode = USB_DR_MODE_HOST; else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) dwc->dr_mode = USB_DR_MODE_PERIPHERAL; if (dwc->dr_mode == USB_DR_MODE_UNKNOWN) dwc->dr_mode = USB_DR_MODE_OTG; ret = dwc3_core_init(dwc); if (ret) { dev_err(dev, "failed to initialize core\n"); goto err1; } /* Check the maximum_speed parameter */ switch (dwc->maximum_speed) { case USB_SPEED_LOW: case USB_SPEED_FULL: case USB_SPEED_HIGH: case USB_SPEED_SUPER: case USB_SPEED_SUPER_PLUS: break; default: dev_err(dev, "invalid maximum_speed parameter %d\n", dwc->maximum_speed); /* fall through */ case USB_SPEED_UNKNOWN: /* default to superspeed */ dwc->maximum_speed = USB_SPEED_SUPER; /* * default to superspeed plus if we are capable. */ if (dwc3_is_usb31(dwc) && (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) == DWC3_GHWPARAMS3_SSPHY_IFC_GEN2)) dwc->maximum_speed = USB_SPEED_SUPER_PLUS; break; } /* Adjust Frame Length */ dwc3_frame_length_adjustment(dwc, fladj); usb_phy_set_suspend(dwc->usb2_phy, 0); usb_phy_set_suspend(dwc->usb3_phy, 0); ret = phy_power_on(dwc->usb2_generic_phy); if (ret < 0) goto err2; ret = phy_power_on(dwc->usb3_generic_phy); if (ret < 0) goto err3; ret = dwc3_event_buffers_setup(dwc); if (ret) { dev_err(dwc->dev, "failed to setup event buffers\n"); goto err4; } ret = dwc3_core_init_mode(dwc); if (ret) goto err5; ret = dwc3_debugfs_init(dwc); if (ret) { dev_err(dev, "failed to initialize debugfs\n"); goto err6; } pm_runtime_allow(dev); return 0; err6: dwc3_core_exit_mode(dwc); err5: dwc3_event_buffers_cleanup(dwc); err4: phy_power_off(dwc->usb3_generic_phy); err3: phy_power_off(dwc->usb2_generic_phy); err2: usb_phy_set_suspend(dwc->usb2_phy, 1); usb_phy_set_suspend(dwc->usb3_phy, 1); dwc3_core_exit(dwc); err1: dwc3_free_event_buffers(dwc); dwc3_ulpi_exit(dwc); err0: /* * restore res->start back to its original value so that, in case the * probe is deferred, we don't end up getting error in request the * memory region the next time probe is called. */ res->start -= DWC3_GLOBALS_REGS_START; return ret; }
static int INITSECTION cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) { struct cmos_rtc_board_info *info = dev->platform_data; int retval = 0; unsigned char rtc_control; unsigned address_space; /* there can be only one ... */ if (cmos_rtc.dev) return -EBUSY; if (!ports) return -ENODEV; /* Claim I/O ports ASAP, minimizing conflict with legacy driver. * * REVISIT non-x86 systems may instead use memory space resources * (needing ioremap etc), not i/o space resources like this ... */ ports = request_region(ports->start, ports->end + 1 - ports->start, driver_name); if (!ports) { dev_dbg(dev, "i/o registers already in use\n"); return -EBUSY; } cmos_rtc.irq = rtc_irq; cmos_rtc.iomem = ports; /* Heuristic to deduce NVRAM size ... do what the legacy NVRAM * driver did, but don't reject unknown configs. Old hardware * won't address 128 bytes. Newer chips have multiple banks, * though they may not be listed in one I/O resource. */ #if defined(CONFIG_ATARI) address_space = 64; #elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__sparc__) address_space = 128; #else #warning Assuming 128 bytes of RTC+NVRAM address space, not 64 bytes. address_space = 128; #endif if (can_bank2 && ports->end > (ports->start + 1)) address_space = 256; /* For ACPI systems extension info comes from the FADT. On others, * board specific setup provides it as appropriate. Systems where * the alarm IRQ isn't automatically a wakeup IRQ (like ACPI, and * some almost-clones) can provide hooks to make that behave. * * Note that ACPI doesn't preclude putting these registers into * "extended" areas of the chip, including some that we won't yet * expect CMOS_READ and friends to handle. */ if (info) { if (info->rtc_day_alarm && info->rtc_day_alarm < 128) cmos_rtc.day_alrm = info->rtc_day_alarm; if (info->rtc_mon_alarm && info->rtc_mon_alarm < 128) cmos_rtc.mon_alrm = info->rtc_mon_alarm; if (info->rtc_century && info->rtc_century < 128) cmos_rtc.century = info->rtc_century; if (info->wake_on && info->wake_off) { cmos_rtc.wake_on = info->wake_on; cmos_rtc.wake_off = info->wake_off; } } cmos_rtc.rtc = rtc_device_register(driver_name, dev, &cmos_rtc_ops, THIS_MODULE); if (IS_ERR(cmos_rtc.rtc)) { retval = PTR_ERR(cmos_rtc.rtc); goto cleanup0; } cmos_rtc.dev = dev; dev_set_drvdata(dev, &cmos_rtc); rename_region(ports, dev_name(&cmos_rtc.rtc->dev)); spin_lock_irq(&rtc_lock); /* force periodic irq to CMOS reset default of 1024Hz; * * REVISIT it's been reported that at least one x86_64 ALI mobo * doesn't use 32KHz here ... for portability we might need to * do something about other clock frequencies. */ cmos_rtc.rtc->irq_freq = 1024; hpet_set_periodic_freq(cmos_rtc.rtc->irq_freq); CMOS_WRITE(RTC_REF_CLCK_32KHZ | 0x06, RTC_FREQ_SELECT); /* disable irqs */ cmos_irq_disable(&cmos_rtc, RTC_PIE | RTC_AIE | RTC_UIE); rtc_control = CMOS_READ(RTC_CONTROL); spin_unlock_irq(&rtc_lock); /* FIXME teach the alarm code how to handle binary mode; * <asm-generic/rtc.h> doesn't know 12-hour mode either. */ if (is_valid_irq(rtc_irq) && (!(rtc_control & RTC_24H) || (rtc_control & (RTC_DM_BINARY)))) { dev_dbg(dev, "only 24-hr BCD mode supported\n"); retval = -ENXIO; goto cleanup1; } if (is_valid_irq(rtc_irq)) { irq_handler_t rtc_cmos_int_handler; if (is_hpet_enabled()) { int err; rtc_cmos_int_handler = hpet_rtc_interrupt; err = hpet_register_irq_handler(cmos_interrupt); if (err != 0) { printk(KERN_WARNING "hpet_register_irq_handler " " failed in rtc_init()."); goto cleanup1; } } else rtc_cmos_int_handler = cmos_interrupt; retval = request_irq(rtc_irq, rtc_cmos_int_handler, IRQF_DISABLED, dev_name(&cmos_rtc.rtc->dev), cmos_rtc.rtc); if (retval < 0) { dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq); goto cleanup1; } } hpet_rtc_timer_init(); /* export at least the first block of NVRAM */ nvram.size = address_space - NVRAM_OFFSET; retval = sysfs_create_bin_file(&dev->kobj, &nvram); if (retval < 0) { dev_dbg(dev, "can't create nvram file? %d\n", retval); goto cleanup2; } pr_info("%s: %s%s, %zd bytes nvram%s\n", dev_name(&cmos_rtc.rtc->dev), !is_valid_irq(rtc_irq) ? "no alarms" : cmos_rtc.mon_alrm ? "alarms up to one year" : cmos_rtc.day_alrm ? "alarms up to one month" : "alarms up to one day", cmos_rtc.century ? ", y3k" : "", nvram.size, is_hpet_enabled() ? ", hpet irqs" : ""); return 0; cleanup2: if (is_valid_irq(rtc_irq)) free_irq(rtc_irq, cmos_rtc.rtc); cleanup1: cmos_rtc.dev = NULL; rtc_device_unregister(cmos_rtc.rtc); cleanup0: release_region(ports->start, ports->end + 1 - ports->start); return retval; }
static int __devinit r_tpu_probe(struct platform_device *pdev) { struct led_renesas_tpu_config *cfg = pdev->dev.platform_data; struct r_tpu_priv *p; struct resource *res; int ret; if (!cfg) { dev_err(&pdev->dev, "missing platform data\n"); return -ENODEV; } p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL); if (p == NULL) { dev_err(&pdev->dev, "failed to allocate driver data\n"); return -ENOMEM; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "failed to get I/O memory\n"); return -ENXIO; } /* map memory, let mapbase point to our channel */ p->mapbase = ioremap_nocache(res->start, resource_size(res)); if (p->mapbase == NULL) { dev_err(&pdev->dev, "failed to remap I/O memory\n"); return -ENXIO; } /* get hold of clock */ p->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(p->clk)) { dev_err(&pdev->dev, "cannot get clock\n"); ret = PTR_ERR(p->clk); goto err0; } p->pdev = pdev; p->pin_state = R_TPU_PIN_UNUSED; p->timer_state = R_TPU_TIMER_UNUSED; p->refresh_rate = cfg->refresh_rate ? cfg->refresh_rate : 100; r_tpu_set_pin(p, R_TPU_PIN_GPIO, LED_OFF); platform_set_drvdata(pdev, p); INIT_WORK(&p->work, r_tpu_work); p->ldev.name = cfg->name; p->ldev.brightness = LED_OFF; p->ldev.max_brightness = cfg->max_brightness; p->ldev.brightness_set = r_tpu_set_brightness; p->ldev.flags |= LED_CORE_SUSPENDRESUME; ret = led_classdev_register(&pdev->dev, &p->ldev); if (ret < 0) goto err1; /* max_brightness may be updated by the LED core code */ p->min_rate = p->ldev.max_brightness * p->refresh_rate; pm_runtime_enable(&pdev->dev); return 0; err1: r_tpu_set_pin(p, R_TPU_PIN_UNUSED, LED_OFF); clk_put(p->clk); err0: iounmap(p->mapbase); return ret; }
static int atmel_pwm_bl_probe(struct platform_device *pdev) { struct backlight_properties props; const struct atmel_pwm_bl_platform_data *pdata; struct backlight_device *bldev; struct atmel_pwm_bl *pwmbl; int retval; pwmbl = kzalloc(sizeof(struct atmel_pwm_bl), GFP_KERNEL); if (!pwmbl) return -ENOMEM; pwmbl->pdev = pdev; pdata = pdev->dev.platform_data; if (!pdata) { retval = -ENODEV; goto err_free_mem; } if (pdata->pwm_compare_max < pdata->pwm_duty_max || pdata->pwm_duty_min > pdata->pwm_duty_max || pdata->pwm_frequency == 0) { retval = -EINVAL; goto err_free_mem; } pwmbl->pdata = pdata; pwmbl->gpio_on = pdata->gpio_on; retval = pwm_channel_alloc(pdata->pwm_channel, &pwmbl->pwmc); if (retval) goto err_free_mem; if (pwmbl->gpio_on != -1) { retval = gpio_request(pwmbl->gpio_on, "gpio_atmel_pwm_bl"); if (retval) { pwmbl->gpio_on = -1; goto err_free_pwm; } /* Turn display off by default. */ retval = gpio_direction_output(pwmbl->gpio_on, 0 ^ pdata->on_active_low); if (retval) goto err_free_gpio; } memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = pdata->pwm_duty_max - pdata->pwm_duty_min; bldev = backlight_device_register("atmel-pwm-bl", &pdev->dev, pwmbl, &atmel_pwm_bl_ops, &props); if (IS_ERR(bldev)) { retval = PTR_ERR(bldev); goto err_free_gpio; } pwmbl->bldev = bldev; platform_set_drvdata(pdev, pwmbl); /* Power up the backlight by default at middle intesity. */ bldev->props.power = FB_BLANK_UNBLANK; bldev->props.brightness = bldev->props.max_brightness / 2; retval = atmel_pwm_bl_init_pwm(pwmbl); if (retval) goto err_free_bl_dev; atmel_pwm_bl_set_intensity(bldev); return 0; err_free_bl_dev: platform_set_drvdata(pdev, NULL); backlight_device_unregister(bldev); err_free_gpio: if (pwmbl->gpio_on != -1) gpio_free(pwmbl->gpio_on); err_free_pwm: pwm_channel_free(&pwmbl->pwmc); err_free_mem: kfree(pwmbl); return retval; }
static int nilfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { struct nilfs_super_data sd; struct super_block *s; struct the_nilfs *nilfs; int err, need_to_close = 1; sd.bdev = open_bdev_exclusive(dev_name, flags, fs_type); if (IS_ERR(sd.bdev)) return PTR_ERR(sd.bdev); /* * To get mount instance using sget() vfs-routine, NILFS needs * much more information than normal filesystems to identify mount * instance. For snapshot mounts, not only a mount type (ro-mount * or rw-mount) but also a checkpoint number is required. */ sd.cno = 0; sd.flags = flags; if (nilfs_identify((char *)data, &sd)) { err = -EINVAL; goto failed; } nilfs = find_or_create_nilfs(sd.bdev); if (!nilfs) { err = -ENOMEM; goto failed; } mutex_lock(&nilfs->ns_mount_mutex); if (!sd.cno) { /* * Check if an exclusive mount exists or not. * Snapshot mounts coexist with a current mount * (i.e. rw-mount or ro-mount), whereas rw-mount and * ro-mount are mutually exclusive. */ down_read(&nilfs->ns_super_sem); if (nilfs->ns_current && ((nilfs->ns_current->s_super->s_flags ^ flags) & MS_RDONLY)) { up_read(&nilfs->ns_super_sem); err = -EBUSY; goto failed_unlock; } up_read(&nilfs->ns_super_sem); } /* * Find existing nilfs_sb_info struct */ sd.sbi = nilfs_find_sbinfo(nilfs, !(flags & MS_RDONLY), sd.cno); /* * Get super block instance holding the nilfs_sb_info struct. * A new instance is allocated if no existing mount is present or * existing instance has been unmounted. */ s = sget(fs_type, nilfs_test_bdev_super, nilfs_set_bdev_super, &sd); if (sd.sbi) nilfs_put_sbinfo(sd.sbi); if (IS_ERR(s)) { err = PTR_ERR(s); goto failed_unlock; } if (!s->s_root) { char b[BDEVNAME_SIZE]; /* New superblock instance created */ s->s_flags = flags; strlcpy(s->s_id, bdevname(sd.bdev, b), sizeof(s->s_id)); sb_set_blocksize(s, block_size(sd.bdev)); err = nilfs_fill_super(s, data, flags & MS_VERBOSE, nilfs); if (err) goto cancel_new; s->s_flags |= MS_ACTIVE; need_to_close = 0; } mutex_unlock(&nilfs->ns_mount_mutex); put_nilfs(nilfs); if (need_to_close) close_bdev_exclusive(sd.bdev, flags); simple_set_mnt(mnt, s); return 0; failed_unlock: mutex_unlock(&nilfs->ns_mount_mutex); put_nilfs(nilfs); failed: close_bdev_exclusive(sd.bdev, flags); return err; cancel_new: /* Abandoning the newly allocated superblock */ mutex_unlock(&nilfs->ns_mount_mutex); put_nilfs(nilfs); deactivate_locked_super(s); /* * deactivate_super() invokes close_bdev_exclusive(). * We must finish all post-cleaning before this call; * put_nilfs() needs the block device. */ return err; }
/* * McBSP1 and McBSP3 are directly mapped on 1610 and 1510. * 730 has only 2 McBSP, and both of them are MPU peripherals. */ int __devinit omap_mcbsp_init(struct platform_device *pdev) { struct omap_mcbsp *mcbsp = platform_get_drvdata(pdev); struct resource *res; int ret = 0; spin_lock_init(&mcbsp->lock); mcbsp->free = true; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpu"); if (!res) { res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(mcbsp->dev, "invalid memory resource\n"); return -ENOMEM; } } if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), dev_name(&pdev->dev))) { dev_err(mcbsp->dev, "memory region already claimed\n"); return -ENODEV; } mcbsp->phys_base = res->start; mcbsp->reg_cache_size = resource_size(res); mcbsp->io_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!mcbsp->io_base) return -ENOMEM; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma"); if (!res) mcbsp->phys_dma_base = mcbsp->phys_base; else mcbsp->phys_dma_base = res->start; /* * OMAP1, 2 uses two interrupt lines: TX, RX * OMAP2430, OMAP3 SoC have combined IRQ line as well. * OMAP4 and newer SoC only have the combined IRQ line. * Use the combined IRQ if available since it gives better debugging * possibilities. */ mcbsp->irq = platform_get_irq_byname(pdev, "common"); if (mcbsp->irq == -ENXIO) { mcbsp->tx_irq = platform_get_irq_byname(pdev, "tx"); if (mcbsp->tx_irq == -ENXIO) { mcbsp->irq = platform_get_irq(pdev, 0); mcbsp->tx_irq = 0; } else { mcbsp->rx_irq = platform_get_irq_byname(pdev, "rx"); mcbsp->irq = 0; } } res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); if (!res) { dev_err(&pdev->dev, "invalid rx DMA channel\n"); return -ENODEV; } /* RX DMA request number, and port address configuration */ mcbsp->dma_data[1].name = "Audio Capture"; mcbsp->dma_data[1].dma_req = res->start; mcbsp->dma_data[1].port_addr = omap_mcbsp_dma_reg_params(mcbsp, 1); res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx"); if (!res) { dev_err(&pdev->dev, "invalid tx DMA channel\n"); return -ENODEV; } /* TX DMA request number, and port address configuration */ mcbsp->dma_data[0].name = "Audio Playback"; mcbsp->dma_data[0].dma_req = res->start; mcbsp->dma_data[0].port_addr = omap_mcbsp_dma_reg_params(mcbsp, 0); mcbsp->fclk = clk_get(&pdev->dev, "fck"); if (IS_ERR(mcbsp->fclk)) { ret = PTR_ERR(mcbsp->fclk); dev_err(mcbsp->dev, "unable to get fck: %d\n", ret); return ret; } mcbsp->dma_op_mode = MCBSP_DMA_MODE_ELEMENT; if (mcbsp->pdata->buffer_size) { /* * Initially configure the maximum thresholds to a safe value. * The McBSP FIFO usage with these values should not go under * 16 locations. * If the whole FIFO without safety buffer is used, than there * is a possibility that the DMA will be not able to push the * new data on time, causing channel shifts in runtime. */ mcbsp->max_tx_thres = max_thres(mcbsp) - 0x10; mcbsp->max_rx_thres = max_thres(mcbsp) - 0x10; ret = sysfs_create_group(&mcbsp->dev->kobj, &additional_attr_group); if (ret) { dev_err(mcbsp->dev, "Unable to create additional controls\n"); goto err_thres; } } else { mcbsp->max_tx_thres = -EINVAL; mcbsp->max_rx_thres = -EINVAL; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sidetone"); if (res) { ret = omap_st_add(mcbsp, res); if (ret) { dev_err(mcbsp->dev, "Unable to create sidetone controls\n"); goto err_st; } } return 0; err_st: if (mcbsp->pdata->buffer_size) sysfs_remove_group(&mcbsp->dev->kobj, &additional_attr_group); err_thres: clk_put(mcbsp->fclk); return ret; }
/** * nilfs_fill_super() - initialize a super block instance * @sb: super_block * @data: mount options * @silent: silent mode flag * @nilfs: the_nilfs struct * * This function is called exclusively by nilfs->ns_mount_mutex. * So, the recovery process is protected from other simultaneous mounts. */ static int nilfs_fill_super(struct super_block *sb, void *data, int silent, struct the_nilfs *nilfs) { struct nilfs_sb_info *sbi; struct inode *root; __u64 cno; int err; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) return -ENOMEM; sb->s_fs_info = sbi; get_nilfs(nilfs); sbi->s_nilfs = nilfs; sbi->s_super = sb; atomic_set(&sbi->s_count, 1); err = init_nilfs(nilfs, sbi, (char *)data); if (err) goto failed_sbi; spin_lock_init(&sbi->s_inode_lock); INIT_LIST_HEAD(&sbi->s_dirty_files); INIT_LIST_HEAD(&sbi->s_list); /* * Following initialization is overlapped because * nilfs_sb_info structure has been cleared at the beginning. * But we reserve them to keep our interest and make ready * for the future change. */ get_random_bytes(&sbi->s_next_generation, sizeof(sbi->s_next_generation)); spin_lock_init(&sbi->s_next_gen_lock); sb->s_op = &nilfs_sops; sb->s_export_op = &nilfs_export_ops; sb->s_root = NULL; sb->s_time_gran = 1; err = load_nilfs(nilfs, sbi); if (err) goto failed_sbi; cno = nilfs_last_cno(nilfs); if (sb->s_flags & MS_RDONLY) { if (nilfs_test_opt(sbi, SNAPSHOT)) { down_read(&nilfs->ns_segctor_sem); err = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile, sbi->s_snapshot_cno); up_read(&nilfs->ns_segctor_sem); if (err < 0) { if (err == -ENOENT) err = -EINVAL; goto failed_sbi; } if (!err) { printk(KERN_ERR "NILFS: The specified checkpoint is " "not a snapshot " "(checkpoint number=%llu).\n", (unsigned long long)sbi->s_snapshot_cno); err = -EINVAL; goto failed_sbi; } cno = sbi->s_snapshot_cno; } else /* Read-only mount */ sbi->s_snapshot_cno = cno; } err = nilfs_attach_checkpoint(sbi, cno); if (err) { printk(KERN_ERR "NILFS: error loading a checkpoint" " (checkpoint number=%llu).\n", (unsigned long long)cno); goto failed_sbi; } if (!(sb->s_flags & MS_RDONLY)) { err = nilfs_attach_segment_constructor(sbi); if (err) goto failed_checkpoint; } root = nilfs_iget(sb, NILFS_ROOT_INO); if (IS_ERR(root)) { printk(KERN_ERR "NILFS: get root inode failed\n"); err = PTR_ERR(root); goto failed_segctor; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { iput(root); printk(KERN_ERR "NILFS: corrupt root inode.\n"); err = -EINVAL; goto failed_segctor; } sb->s_root = d_alloc_root(root); if (!sb->s_root) { iput(root); printk(KERN_ERR "NILFS: get root dentry failed\n"); err = -ENOMEM; goto failed_segctor; } if (!(sb->s_flags & MS_RDONLY)) { down_write(&nilfs->ns_sem); nilfs_setup_super(sbi); up_write(&nilfs->ns_sem); } down_write(&nilfs->ns_super_sem); if (!nilfs_test_opt(sbi, SNAPSHOT)) nilfs->ns_current = sbi; up_write(&nilfs->ns_super_sem); return 0; failed_segctor: nilfs_detach_segment_constructor(sbi); failed_checkpoint: nilfs_detach_checkpoint(sbi); failed_sbi: put_nilfs(nilfs); sb->s_fs_info = NULL; nilfs_put_sbinfo(sbi); return err; }
static int __init tsi108_eth_of_init(void) { struct device_node *np; unsigned int i; struct platform_device *tsi_eth_dev; struct resource res; int ret; for (np = NULL, i = 0; (np = of_find_compatible_node(np, "network", "tsi-ethernet")) != NULL; i++) { struct resource r[2]; struct device_node *phy; hw_info tsi_eth_data; unsigned int *id; unsigned int *phy_id; const void *mac_addr; phandle *ph; memset(r, 0, sizeof(r)); memset(&tsi_eth_data, 0, sizeof(tsi_eth_data)); ret = of_address_to_resource(np, 0, &r[0]); DBG("%s: name:start->end = %s:0x%lx-> 0x%lx\n", __FUNCTION__,r[0].name, r[0].start, r[0].end); if (ret) goto err; r[1].name = "tx"; r[1].start = irq_of_parse_and_map(np, 0); r[1].end = irq_of_parse_and_map(np, 0); r[1].flags = IORESOURCE_IRQ; DBG("%s: name:start->end = %s:0x%lx-> 0x%lx\n", __FUNCTION__,r[1].name, r[1].start, r[1].end); tsi_eth_dev = platform_device_register_simple("tsi-ethernet", i, &r[0], 1); if (IS_ERR(tsi_eth_dev)) { ret = PTR_ERR(tsi_eth_dev); goto err; } mac_addr = get_property(np, "address", NULL); memcpy(tsi_eth_data.mac_addr, mac_addr, 6); ph = (phandle *) get_property(np, "phy-handle", NULL); phy = of_find_node_by_phandle(*ph); if (phy == NULL) { ret = -ENODEV; goto unreg; } id = (u32 *) get_property(phy, "reg", NULL); phy_id = (u32 *) get_property(phy, "phy-id", NULL); ret = of_address_to_resource(phy, 0, &res); if (ret) { of_node_put(phy); goto unreg; } tsi_eth_data.regs = r[0].start; tsi_eth_data.phyregs = res.start; tsi_eth_data.phy = *phy_id; tsi_eth_data.irq_num = irq_of_parse_and_map(np, 0); of_node_put(phy); ret = platform_device_add_data(tsi_eth_dev, &tsi_eth_data, sizeof(hw_info)); if (ret) goto unreg; } return 0; unreg: platform_device_unregister(tsi_eth_dev); err: return ret; }
static int lttng_abi_create_event(struct file *channel_file, struct lttng_kernel_event *event_param) { struct lttng_channel *channel = channel_file->private_data; int event_fd, ret; struct file *event_file; void *priv; event_param->name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0'; switch (event_param->instrumentation) { case LTTNG_KERNEL_KRETPROBE: event_param->u.kretprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0'; break; case LTTNG_KERNEL_KPROBE: event_param->u.kprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0'; break; case LTTNG_KERNEL_FUNCTION: event_param->u.ftrace.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0'; break; default: break; } event_fd = lttng_get_unused_fd(); if (event_fd < 0) { ret = event_fd; goto fd_error; } event_file = anon_inode_getfile("[lttng_event]", <tng_event_fops, NULL, O_RDWR); if (IS_ERR(event_file)) { ret = PTR_ERR(event_file); goto file_error; } /* The event holds a reference on the channel */ if (atomic_long_add_unless(&channel_file->f_count, 1, INT_MAX) == INT_MAX) { ret = -EOVERFLOW; goto refcount_error; } if (event_param->instrumentation == LTTNG_KERNEL_TRACEPOINT || event_param->instrumentation == LTTNG_KERNEL_SYSCALL) { struct lttng_enabler *enabler; if (event_param->name[strlen(event_param->name) - 1] == '*') { enabler = lttng_enabler_create(LTTNG_ENABLER_WILDCARD, event_param, channel); } else { enabler = lttng_enabler_create(LTTNG_ENABLER_NAME, event_param, channel); } priv = enabler; } else { struct lttng_event *event; /* * We tolerate no failure path after event creation. It * will stay invariant for the rest of the session. */ event = lttng_event_create(channel, event_param, NULL, NULL, event_param->instrumentation); WARN_ON_ONCE(!event); if (IS_ERR(event)) { ret = PTR_ERR(event); goto event_error; } priv = event; } event_file->private_data = priv; fd_install(event_fd, event_file); return event_fd; event_error: atomic_long_dec(&channel_file->f_count); refcount_error: fput(event_file); file_error: put_unused_fd(event_fd); fd_error: return ret; }
static int pxa_irda_probe(struct platform_device *pdev) { struct net_device *dev; struct pxa_irda *si; unsigned int baudrate_mask; int err; if (!pdev->dev.platform_data) return -ENODEV; err = request_mem_region(__PREG(STUART), 0x24, "IrDA") ? 0 : -EBUSY; if (err) goto err_mem_1; err = request_mem_region(__PREG(FICP), 0x1c, "IrDA") ? 0 : -EBUSY; if (err) goto err_mem_2; dev = alloc_irdadev(sizeof(struct pxa_irda)); if (!dev) goto err_mem_3; SET_NETDEV_DEV(dev, &pdev->dev); si = netdev_priv(dev); si->dev = &pdev->dev; si->pdata = pdev->dev.platform_data; si->sir_clk = clk_get(&pdev->dev, "UARTCLK"); si->fir_clk = clk_get(&pdev->dev, "FICPCLK"); if (IS_ERR(si->sir_clk) || IS_ERR(si->fir_clk)) { err = PTR_ERR(IS_ERR(si->sir_clk) ? si->sir_clk : si->fir_clk); goto err_mem_4; } /* * Initialise the SIR buffers */ err = pxa_irda_init_iobuf(&si->rx_buff, 14384); if (err) goto err_mem_4; err = pxa_irda_init_iobuf(&si->tx_buff, 4000); if (err) goto err_mem_5; if (gpio_is_valid(si->pdata->gpio_pwdown)) { err = gpio_request(si->pdata->gpio_pwdown, "IrDA switch"); if (err) goto err_startup; err = gpio_direction_output(si->pdata->gpio_pwdown, !si->pdata->gpio_pwdown_inverted); if (err) { gpio_free(si->pdata->gpio_pwdown); goto err_startup; } } if (si->pdata->startup) { err = si->pdata->startup(si->dev); if (err) goto err_startup; } if (gpio_is_valid(si->pdata->gpio_pwdown) && si->pdata->startup) dev_warn(si->dev, "gpio_pwdown and startup() both defined!\n"); dev->netdev_ops = &pxa_irda_netdev_ops; irda_init_max_qos_capabilies(&si->qos); baudrate_mask = 0; if (si->pdata->transceiver_cap & IR_SIRMODE) baudrate_mask |= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200; if (si->pdata->transceiver_cap & IR_FIRMODE) baudrate_mask |= IR_4000000 << 8; si->qos.baud_rate.bits &= baudrate_mask; si->qos.min_turn_time.bits = 7; /* 1ms or more */ irda_qos_bits_to_value(&si->qos); err = register_netdev(dev); if (err == 0) dev_set_drvdata(&pdev->dev, dev); if (err) { if (si->pdata->shutdown) si->pdata->shutdown(si->dev); err_startup: kfree(si->tx_buff.head); err_mem_5: kfree(si->rx_buff.head); err_mem_4: if (si->sir_clk && !IS_ERR(si->sir_clk)) clk_put(si->sir_clk); if (si->fir_clk && !IS_ERR(si->fir_clk)) clk_put(si->fir_clk); free_netdev(dev); err_mem_3: release_mem_region(__PREG(FICP), 0x1c); err_mem_2: release_mem_region(__PREG(STUART), 0x24); } err_mem_1: return err; }
static int lttng_abi_create_channel(struct file *session_file, struct lttng_kernel_channel *chan_param, enum channel_type channel_type) { struct lttng_session *session = session_file->private_data; const struct file_operations *fops = NULL; const char *transport_name; struct lttng_channel *chan; struct file *chan_file; int chan_fd; int ret = 0; chan_fd = lttng_get_unused_fd(); if (chan_fd < 0) { ret = chan_fd; goto fd_error; } switch (channel_type) { case PER_CPU_CHANNEL: fops = <tng_channel_fops; break; case METADATA_CHANNEL: fops = <tng_metadata_fops; break; } chan_file = anon_inode_getfile("[lttng_channel]", fops, NULL, O_RDWR); if (IS_ERR(chan_file)) { ret = PTR_ERR(chan_file); goto file_error; } switch (channel_type) { case PER_CPU_CHANNEL: if (chan_param->output == LTTNG_KERNEL_SPLICE) { transport_name = chan_param->overwrite ? "relay-overwrite" : "relay-discard"; } else if (chan_param->output == LTTNG_KERNEL_MMAP) { transport_name = chan_param->overwrite ? "relay-overwrite-mmap" : "relay-discard-mmap"; } else { return -EINVAL; } break; case METADATA_CHANNEL: if (chan_param->output == LTTNG_KERNEL_SPLICE) transport_name = "relay-metadata"; else if (chan_param->output == LTTNG_KERNEL_MMAP) transport_name = "relay-metadata-mmap"; else return -EINVAL; break; default: transport_name = "<unknown>"; break; } if (atomic_long_add_unless(&session_file->f_count, 1, INT_MAX) == INT_MAX) { goto refcount_error; } /* * We tolerate no failure path after channel creation. It will stay * invariant for the rest of the session. */ chan = lttng_channel_create(session, transport_name, NULL, chan_param->subbuf_size, chan_param->num_subbuf, chan_param->switch_timer_interval, chan_param->read_timer_interval, channel_type); if (!chan) { ret = -EINVAL; goto chan_error; } chan->file = chan_file; chan_file->private_data = chan; fd_install(chan_fd, chan_file); return chan_fd; chan_error: atomic_long_dec(&session_file->f_count); refcount_error: fput(chan_file); file_error: put_unused_fd(chan_fd); fd_error: return ret; }
/* * Handle the detection and initialisation of a card. * * In the case of a resume, "oldcard" will contain the card * we're trying to reinitialise. */ static int mmc_init_card(struct mmc_host *host, u32 ocr, struct mmc_card *oldcard) { struct mmc_card *card; int err, ddr = 0; u32 cid[4]; unsigned int max_dtr; u32 rocr; u8 *ext_csd = NULL; BUG_ON(!host); WARN_ON(!host->claimed); /* Set correct bus mode for MMC before attempting init */ if (!mmc_host_is_spi(host)) mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN); /* Initialization should be done at 3.3 V I/O voltage. */ mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0); /* * Since we're changing the OCR value, we seem to * need to tell some cards to go back to the idle * state. We wait 1ms to give cards time to * respond. */ mmc_go_idle(host); /* The extra bit indicates that we support high capacity */ err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr); if (err) goto err; /* * For SPI, enable CRC as appropriate. */ if (mmc_host_is_spi(host)) { err = mmc_spi_set_crc(host, use_spi_crc); if (err) goto err; } /* * Fetch CID from card. */ if (mmc_host_is_spi(host)) err = mmc_send_cid(host, cid); else err = mmc_all_send_cid(host, cid); if (err) goto err; if (oldcard) { if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) { err = -ENOENT; goto err; } card = oldcard; } else { /* * Allocate card structure. */ card = mmc_alloc_card(host, &mmc_type); if (IS_ERR(card)) { err = PTR_ERR(card); goto err; } card->type = MMC_TYPE_MMC; card->rca = 1; memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); } /* * For native busses: set card RCA and quit open drain mode. */ if (!mmc_host_is_spi(host)) { err = mmc_set_relative_addr(card); if (err) goto free_card; mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); } if (!oldcard) { /* * Fetch CSD from card. */ err = mmc_send_csd(card, card->raw_csd); if (err) goto free_card; err = mmc_decode_csd(card); if (err) goto free_card; err = mmc_decode_cid(card); if (err) goto free_card; } /* * Select card, as all following commands rely on that. */ if (!mmc_host_is_spi(host)) { err = mmc_select_card(card); if (err) goto free_card; } if (!oldcard) { /* * Fetch and process extended CSD. */ err = mmc_get_ext_csd(card, &ext_csd); if (err) goto free_card; err = mmc_read_ext_csd(card, ext_csd); if (err) goto free_card; /* If doing byte addressing, check if required to do sector * addressing. Handle the case of <2GB cards needing sector * addressing. See section 8.1 JEDEC Standard JED84-A441; * ocr register has bit 30 set for sector addressing. */ if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30))) mmc_card_set_blockaddr(card); /* Erase size depends on CSD and Extended CSD */ mmc_set_erase_size(card); } /* * this bit will be lost after power off * or reset, so change this bit to be 0 */ card->ext_csd.erase_group_def = 0; /* * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF * bit. This bit will be lost every time after a reset or power off. */ if (card->ext_csd.enhanced_area_en || card->ext_csd.part_set_complete || (card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_ERASE_GROUP_DEF, 1, 0, 1); if (err && err != -EBADMSG) goto free_card; if (err) { err = 0; /* * Just disable enhanced area off & sz * will try to enable ERASE_GROUP_DEF * during next time reinit */ card->enhanced_area_offset = 0; card->enhanced_area_size = 0; } else { card->ext_csd.erase_group_def = 1; card->enhanced_area_offset = card->ext_csd.enhanced_area_offset; card->enhanced_area_size = card->ext_csd.enhanced_area_size; /* * enable ERASE_GRP_DEF successfully. * This will affect the erase size, so * here need to reset erase size */ mmc_set_erase_size(card); } } /* * Ensure eMMC user default partition is enabled */ if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) { card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, card->ext_csd.part_config, card->ext_csd.part_time, 1); if (err && err != -EBADMSG) goto free_card; } /* * If the host supports the power_off_notify capability then * set the notification byte in the ext_csd register of device */ if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) && (card->ext_csd.rev >= 6)) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_POWER_OFF_NOTIFICATION, EXT_CSD_POWER_ON, card->ext_csd.generic_cmd6_time, 1); if (err && err != -EBADMSG) goto free_card; /* * The err can be -EBADMSG or 0, * so check for success and update the flag */ if (!err) card->poweroff_notify_state = MMC_POWERED_ON; } /* * Activate high speed (if supported) */ if ((card->ext_csd.hs_max_dtr != 0) && (host->caps & MMC_CAP_MMC_HIGHSPEED)) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 1, 0, 1); if (err && err != -EBADMSG) goto free_card; if (err) { printk(KERN_WARNING "%s: switch to highspeed failed\n", mmc_hostname(card->host)); err = 0; } else { mmc_card_set_highspeed(card); mmc_set_timing(card->host, MMC_TIMING_MMC_HS); } } /* * Enable HPI feature (if supported) */ if (card->ext_csd.hpi) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HPI_MGMT, 1, 0, 1); if (err && err != -EBADMSG) goto free_card; if (err) { pr_warning("%s: Enabling HPI failed\n", mmc_hostname(card->host)); err = 0; } else card->ext_csd.hpi_en = 1; } /* * Compute bus speed. */ max_dtr = (unsigned int)-1; if (mmc_card_highspeed(card)) { if (max_dtr > card->ext_csd.hs_max_dtr) max_dtr = card->ext_csd.hs_max_dtr; } else if (max_dtr > card->csd.max_dtr) { max_dtr = card->csd.max_dtr; } mmc_set_clock(host, max_dtr); /* * Indicate DDR mode (if supported). */ if (mmc_card_highspeed(card)) { if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) && ((host->caps & (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50)) == (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50))) ddr = MMC_1_8V_DDR_MODE; else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) && ((host->caps & (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50)) == (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50))) ddr = MMC_1_2V_DDR_MODE; } /* * Activate wide bus and DDR (if supported). */ if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) && (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { static unsigned ext_csd_bits[][2] = { { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 }, { EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 }, { EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 }, }; static unsigned bus_widths[] = { MMC_BUS_WIDTH_8, MMC_BUS_WIDTH_4, MMC_BUS_WIDTH_1 }; unsigned idx, bus_width = 0; if (host->caps & MMC_CAP_8_BIT_DATA) idx = 0; else idx = 1; for (; idx < ARRAY_SIZE(bus_widths); idx++) { bus_width = bus_widths[idx]; if (bus_width == MMC_BUS_WIDTH_1) ddr = 0; /* no DDR for 1-bit width */ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, ext_csd_bits[idx][0], 0, 1); if (!err) { mmc_set_bus_width(card->host, bus_width); /* * If controller can't handle bus width test, * compare ext_csd previously read in 1 bit mode * against ext_csd at new bus width */ if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) err = mmc_compare_ext_csds(card, bus_width); else err = mmc_bus_test(card, bus_width); if (!err) break; } } if (!err && ddr) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, ext_csd_bits[idx][1], 0, 1); } if (err) { printk(KERN_WARNING "%s: switch to bus width %d ddr %d " "failed\n", mmc_hostname(card->host), 1 << bus_width, ddr); goto free_card; } else if (ddr) { /* * eMMC cards can support 3.3V to 1.2V i/o (vccq) * signaling. * * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq. * * 1.8V vccq at 3.3V core voltage (vcc) is not required * in the JEDEC spec for DDR. * * Do not force change in vccq since we are obviously * working and no change to vccq is needed. * * WARNING: eMMC rules are NOT the same as SD DDR */ if (ddr == MMC_1_2V_DDR_MODE) { err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0); if (err) goto err; } mmc_card_set_ddr_mode(card); mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50); mmc_set_bus_width(card->host, bus_width); } } if (!oldcard) host->card = card; mmc_free_ext_csd(ext_csd); return 0; free_card: if (!oldcard) mmc_remove_card(card); err: mmc_free_ext_csd(ext_csd); return err; }
/** * mmc_init_queue - initialise a queue structure. * @mq: mmc queue * @card: mmc card to attach this queue * @lock: queue lock * * Initialise a MMC card request queue. */ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) { struct mmc_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; int ret; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) limit = *mmc_dev(host)->dma_mask; mq->card = card; mq->queue = blk_init_queue(mmc_request, lock); if (!mq->queue) return -ENOMEM; mq->queue->queuedata = mq; mq->req = NULL; blk_queue_prep_rq(mq->queue, mmc_prep_request); blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); #ifdef CONFIG_MMC_BLOCK_BOUNCE if (host->max_hw_segs == 1) { unsigned int bouncesz; bouncesz = MMC_QUEUE_BOUNCESZ; if (bouncesz > host->max_req_size) bouncesz = host->max_req_size; if (bouncesz > host->max_seg_size) bouncesz = host->max_seg_size; if (bouncesz > (host->max_blk_count * 512)) bouncesz = host->max_blk_count * 512; if (bouncesz > 512) { mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); if (!mq->bounce_buf) { printk(KERN_WARNING "%s: unable to " "allocate bounce buffer\n", mmc_card_name(card)); } } if (mq->bounce_buf) { blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); blk_queue_max_sectors(mq->queue, bouncesz / 512); blk_queue_max_phys_segments(mq->queue, bouncesz / 512); blk_queue_max_hw_segments(mq->queue, bouncesz / 512); blk_queue_max_segment_size(mq->queue, bouncesz); mq->sg = kmalloc(sizeof(struct scatterlist), GFP_KERNEL); if (!mq->sg) { ret = -ENOMEM; goto cleanup_queue; } sg_init_table(mq->sg, 1); mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * bouncesz / 512, GFP_KERNEL); if (!mq->bounce_sg) { ret = -ENOMEM; goto cleanup_queue; } sg_init_table(mq->bounce_sg, bouncesz / 512); } } #endif if (!mq->bounce_buf) { blk_queue_bounce_limit(mq->queue, limit); blk_queue_max_sectors(mq->queue, min(host->max_blk_count, host->max_req_size / 512)); blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); mq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs, GFP_KERNEL); if (!mq->sg) { ret = -ENOMEM; goto cleanup_queue; } sg_init_table(mq->sg, host->max_phys_segs); } init_MUTEX(&mq->thread_sem); mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd"); if (IS_ERR(mq->thread)) { ret = PTR_ERR(mq->thread); goto free_bounce_sg; } return 0; free_bounce_sg: if (mq->bounce_sg) kfree(mq->bounce_sg); mq->bounce_sg = NULL; cleanup_queue: if (mq->sg) kfree(mq->sg); mq->sg = NULL; if (mq->bounce_buf) kfree(mq->bounce_buf); mq->bounce_buf = NULL; blk_cleanup_queue(mq->queue); return ret; }
static int __init msm_otg_probe(struct platform_device *pdev) { int ret = 0; int vbus_on_irq = 0; struct resource *res; struct msm_otg *dev; struct msm_otg_platform_data *pdata; dev = kzalloc(sizeof(struct msm_otg), GFP_KERNEL); if (!dev) return -ENOMEM; dev->otg.dev = &pdev->dev; pdata = pdev->dev.platform_data; if (pdev->dev.platform_data) { dev->rpc_connect = pdata->rpc_connect; dev->phy_reset = pdata->phy_reset; dev->core_clk = pdata->core_clk; /* pmic apis */ dev->pmic_notif_init = pdata->pmic_notif_init; dev->pmic_notif_deinit = pdata->pmic_notif_deinit; dev->pmic_register_vbus_sn = pdata->pmic_register_vbus_sn; dev->pmic_unregister_vbus_sn = pdata->pmic_unregister_vbus_sn; dev->pmic_enable_ldo = pdata->pmic_enable_ldo; } if (pdata && pdata->pmic_vbus_irq) { vbus_on_irq = platform_get_irq_byname(pdev, "vbus_on"); if (vbus_on_irq < 0) { pr_err("%s: unable to get vbus on irq\n", __func__); ret = vbus_on_irq; goto free_dev; } } if (dev->rpc_connect) { ret = dev->rpc_connect(1); pr_info("%s: rpc_connect(%d)\n", __func__, ret); if (ret) { pr_err("%s: rpc connect failed\n", __func__); ret = -ENODEV; goto free_dev; } } dev->clk = clk_get(&pdev->dev, "usb_hs_clk"); if (IS_ERR(dev->clk)) { pr_err("%s: failed to get usb_hs_clk\n", __func__); ret = PTR_ERR(dev->clk); goto rpc_fail; } dev->pclk = clk_get(&pdev->dev, "usb_hs_pclk"); if (IS_ERR(dev->pclk)) { pr_err("%s: failed to get usb_hs_pclk\n", __func__); ret = PTR_ERR(dev->pclk); goto put_clk; } if (dev->core_clk) { dev->cclk = clk_get(&pdev->dev, "usb_hs_core_clk"); if (IS_ERR(dev->cclk)) { pr_err("%s: failed to get usb_hs_core_clk\n", __func__); ret = PTR_ERR(dev->cclk); goto put_pclk; } } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { pr_err("%s: failed to get platform resource mem\n", __func__); ret = -ENODEV; goto put_cclk; } dev->regs = ioremap(res->start, resource_size(res)); if (!dev->regs) { pr_err("%s: ioremap failed\n", __func__); ret = -ENOMEM; goto put_cclk; } dev->irq = platform_get_irq(pdev, 0); if (!dev->irq) { pr_err("%s: platform_get_irq failed\n", __func__); ret = -ENODEV; goto free_regs; } /* enable clocks */ clk_enable(dev->pclk); if (dev->cclk) clk_enable(dev->cclk); /* To reduce phy power consumption and to avoid external LDO * on the board, PMIC comparators can be used to detect VBUS * session change. */ if (dev->pmic_notif_init) { ret = dev->pmic_notif_init(); if (!ret) { dev->pmic_notif_supp = 1; dev->pmic_enable_ldo(1); } else if (ret != -ENOTSUPP) { clk_disable(dev->pclk); if (dev->cclk) clk_disable(dev->cclk); goto free_regs; } } otg_reset(dev); ret = request_irq(dev->irq, msm_otg_irq, IRQF_SHARED, "msm_otg", dev); if (ret) { pr_info("%s: request irq failed\n", __func__); clk_disable(dev->pclk); if (dev->cclk) clk_disable(dev->cclk); goto free_regs; } the_msm_otg = dev; dev->vbus_on_irq = vbus_on_irq; dev->otg.set_peripheral = msm_otg_set_peripheral; dev->otg.set_host = msm_otg_set_host; dev->otg.set_suspend = msm_otg_set_suspend; dev->set_clk = msm_otg_set_clk; if (otg_set_transceiver(&dev->otg)) { WARN_ON(1); goto free_otg_irq; } device_init_wakeup(&pdev->dev, 1); if (vbus_on_irq) { ret = request_irq(vbus_on_irq, pmic_vbus_on_irq, IRQF_TRIGGER_RISING, "msm_otg_vbus_on", NULL); if (ret) { pr_info("%s: request_irq for vbus_on" "interrupt failed\n", __func__); goto free_otg_irq; } } return 0; free_otg_irq: free_irq(dev->irq, dev); free_regs: iounmap(dev->regs); put_cclk: if (dev->cclk) clk_put(dev->cclk); put_pclk: clk_put(dev->pclk); put_clk: clk_put(dev->clk); rpc_fail: dev->rpc_connect(0); free_dev: kfree(dev); return ret; }
int efx_lm87_probe(struct i2c_client *new_client, const struct i2c_device_id *id) #endif { struct lm87_data *data; int err; data = kzalloc(sizeof(struct lm87_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } i2c_set_clientdata(new_client, data); data->valid = 0; mutex_init(&data->update_lock); /* Initialize the LM87 chip */ lm87_init_client(new_client); data->in_scale[0] = 2500; data->in_scale[1] = 2700; data->in_scale[2] = (data->channel & CHAN_VCC_5V) ? 5000 : 3300; data->in_scale[3] = 5000; data->in_scale[4] = 12000; data->in_scale[5] = 2700; data->in_scale[6] = 1875; data->in_scale[7] = 1875; /* Register sysfs hooks */ if ((err = sysfs_create_group(&new_client->dev.kobj, &lm87_group))) goto exit_free; if (data->channel & CHAN_NO_FAN(0)) { if ((err = device_create_file(&new_client->dev, &dev_attr_in6_input)) || (err = device_create_file(&new_client->dev, &dev_attr_in6_min)) || (err = device_create_file(&new_client->dev, &dev_attr_in6_max)) || (err = device_create_file(&new_client->dev, &sensor_dev_attr_in6_alarm.dev_attr))) goto exit_remove; } else { if ((err = device_create_file(&new_client->dev, &dev_attr_fan1_input)) || (err = device_create_file(&new_client->dev, &dev_attr_fan1_min)) || (err = device_create_file(&new_client->dev, &dev_attr_fan1_div)) || (err = device_create_file(&new_client->dev, &sensor_dev_attr_fan1_alarm.dev_attr))) goto exit_remove; } if (data->channel & CHAN_NO_FAN(1)) { if ((err = device_create_file(&new_client->dev, &dev_attr_in7_input)) || (err = device_create_file(&new_client->dev, &dev_attr_in7_min)) || (err = device_create_file(&new_client->dev, &dev_attr_in7_max)) || (err = device_create_file(&new_client->dev, &sensor_dev_attr_in7_alarm.dev_attr))) goto exit_remove; } else { if ((err = device_create_file(&new_client->dev, &dev_attr_fan2_input)) || (err = device_create_file(&new_client->dev, &dev_attr_fan2_min)) || (err = device_create_file(&new_client->dev, &dev_attr_fan2_div)) || (err = device_create_file(&new_client->dev, &sensor_dev_attr_fan2_alarm.dev_attr))) goto exit_remove; } if (data->channel & CHAN_TEMP3) { if ((err = device_create_file(&new_client->dev, &dev_attr_temp3_input)) || (err = device_create_file(&new_client->dev, &dev_attr_temp3_max)) || (err = device_create_file(&new_client->dev, &dev_attr_temp3_min)) || (err = device_create_file(&new_client->dev, &dev_attr_temp3_crit)) || (err = device_create_file(&new_client->dev, &sensor_dev_attr_temp3_alarm.dev_attr)) || (err = device_create_file(&new_client->dev, &sensor_dev_attr_temp3_fault.dev_attr))) goto exit_remove; } else { if ((err = device_create_file(&new_client->dev, &dev_attr_in0_input)) || (err = device_create_file(&new_client->dev, &dev_attr_in0_min)) || (err = device_create_file(&new_client->dev, &dev_attr_in0_max)) || (err = device_create_file(&new_client->dev, &sensor_dev_attr_in0_alarm.dev_attr)) || (err = device_create_file(&new_client->dev, &dev_attr_in5_input)) || (err = device_create_file(&new_client->dev, &dev_attr_in5_min)) || (err = device_create_file(&new_client->dev, &dev_attr_in5_max)) || (err = device_create_file(&new_client->dev, &sensor_dev_attr_in5_alarm.dev_attr))) goto exit_remove; } if (!(data->channel & CHAN_NO_VID)) { data->vrm = vid_which_vrm(); if ((err = device_create_file(&new_client->dev, &dev_attr_cpu0_vid)) || (err = device_create_file(&new_client->dev, &dev_attr_vrm))) goto exit_remove; } data->hwmon_dev = hwmon_device_register(&new_client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove; } return 0; exit_remove: sysfs_remove_group(&new_client->dev.kobj, &lm87_group); sysfs_remove_group(&new_client->dev.kobj, &lm87_group_opt); exit_free: lm87_write_value(new_client, LM87_REG_CONFIG, data->config); kfree(data); exit: return err; }
static int __devinit tegra_sdhci_probe(struct platform_device *pdev) { int rc; struct tegra_sdhci_platform_data *plat; struct sdhci_host *sdhci; struct tegra_sdhci_host *host; struct resource *res; int irq; void __iomem *ioaddr; plat = pdev->dev.platform_data; if (plat == NULL) return -ENXIO; res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (res == NULL) return -ENODEV; irq = res->start; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) return -ENODEV; ioaddr = ioremap(res->start, res->end - res->start); sdhci = sdhci_alloc_host(&pdev->dev, sizeof(struct tegra_sdhci_host)); if (IS_ERR(sdhci)) { rc = PTR_ERR(sdhci); goto err_unmap; } host = sdhci_priv(sdhci); host->sdhci = sdhci; host->card_always_on = (plat->power_gpio == -1) ? 1 : 0; host->wp_gpio = plat->wp_gpio; host->clk = clk_get(&pdev->dev, plat->clk_id); if (IS_ERR(host->clk)) { rc = PTR_ERR(host->clk); goto err_free_host; } rc = clk_enable(host->clk); if (rc != 0) goto err_clkput; host->clk_enabled = 1; sdhci->hw_name = "tegra"; sdhci->ops = &tegra_sdhci_ops; sdhci->irq = irq; sdhci->ioaddr = ioaddr; sdhci->version = SDHCI_SPEC_200; sdhci->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_SINGLE_POWER_WRITE | SDHCI_QUIRK_ENABLE_INTERRUPT_AT_BLOCK_GAP | SDHCI_QUIRK_BROKEN_WRITE_PROTECT | SDHCI_QUIRK_BROKEN_CTRL_HISPD | SDHCI_QUIRK_NO_HISPD_BIT | SDHCI_QUIRK_8_BIT_DATA | SDHCI_QUIRK_NO_VERSION_REG | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | SDHCI_QUIRK_RUNTIME_DISABLE; if (plat->force_hs != 0) sdhci->quirks |= SDHCI_QUIRK_FORCE_HIGH_SPEED_MODE; #ifdef CONFIG_MMC_EMBEDDED_SDIO mmc_set_embedded_sdio_data(sdhci->mmc, &plat->cis, &plat->cccr, plat->funcs, plat->num_funcs); #endif if (host->card_always_on) sdhci->mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY; rc = sdhci_add_host(sdhci); if (rc) goto err_clk_disable; platform_set_drvdata(pdev, host); if (plat->cd_gpio != -1) { rc = request_irq(gpio_to_irq(plat->cd_gpio), carddetect_irq, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, mmc_hostname(sdhci->mmc), sdhci); if (rc) goto err_remove_host; } else if (plat->register_status_notify) { plat->register_status_notify( tegra_sdhci_status_notify_cb, sdhci); } if (plat->board_probe) plat->board_probe(pdev->id, sdhci->mmc); printk(KERN_INFO "sdhci%d: initialized irq %d ioaddr %p\n", pdev->id, sdhci->irq, sdhci->ioaddr); return 0; err_remove_host: sdhci_remove_host(sdhci, 1); err_clk_disable: clk_disable(host->clk); err_clkput: clk_put(host->clk); err_free_host: if (sdhci) sdhci_free_host(sdhci); err_unmap: iounmap(sdhci->ioaddr); return rc; }
int btrfs_csum_file_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 alloc_end, u64 bytenr, char *data, size_t len) { int ret = 0; struct btrfs_key file_key; struct btrfs_key found_key; u64 next_offset = (u64)-1; int found_next = 0; struct btrfs_path *path; struct btrfs_csum_item *item; struct extent_buffer *leaf = NULL; u64 csum_offset; u32 csum_result = ~(u32)0; u32 nritems; u32 ins_size; u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); path = btrfs_alloc_path(); BUG_ON(!path); file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; file_key.offset = bytenr; file_key.type = BTRFS_EXTENT_CSUM_KEY; item = btrfs_lookup_csum(trans, root, path, bytenr, 1); if (!IS_ERR(item)) { leaf = path->nodes[0]; ret = 0; goto found; } ret = PTR_ERR(item); if (ret == -EFBIG) { u32 item_size; /* we found one, but it isn't big enough yet */ leaf = path->nodes[0]; item_size = btrfs_item_size_nr(leaf, path->slots[0]); if ((item_size / csum_size) >= MAX_CSUM_ITEMS(root, csum_size)) { /* already at max size, make a new one */ goto insert; } } else { int slot = path->slots[0] + 1; /* we didn't find a csum item, insert one */ nritems = btrfs_header_nritems(path->nodes[0]); if (path->slots[0] >= nritems - 1) { ret = btrfs_next_leaf(root, path); if (ret == 1) found_next = 1; if (ret != 0) goto insert; slot = 0; } btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || found_key.type != BTRFS_EXTENT_CSUM_KEY) { found_next = 1; goto insert; } next_offset = found_key.offset; found_next = 1; goto insert; } /* * at this point, we know the tree has an item, but it isn't big * enough yet to put our csum in. Grow it */ btrfs_release_path(path); ret = btrfs_search_slot(trans, root, &file_key, path, csum_size, 1); if (ret < 0) goto fail; if (ret == 0) { BUG(); } if (path->slots[0] == 0) { goto insert; } path->slots[0]--; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); csum_offset = (file_key.offset - found_key.offset) / root->sectorsize; if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || found_key.type != BTRFS_EXTENT_CSUM_KEY || csum_offset >= MAX_CSUM_ITEMS(root, csum_size)) { goto insert; } if (csum_offset >= btrfs_item_size_nr(leaf, path->slots[0]) / csum_size) { u32 diff = (csum_offset + 1) * csum_size; diff = diff - btrfs_item_size_nr(leaf, path->slots[0]); if (diff != csum_size) goto insert; ret = btrfs_extend_item(trans, root, path, diff); BUG_ON(ret); goto csum; } insert: btrfs_release_path(path); csum_offset = 0; if (found_next) { u64 tmp = min(alloc_end, next_offset); tmp -= file_key.offset; tmp /= root->sectorsize; tmp = max((u64)1, tmp); tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size)); ins_size = csum_size * tmp; } else { ins_size = csum_size; } ret = btrfs_insert_empty_item(trans, root, path, &file_key, ins_size); if (ret < 0) goto fail; if (ret != 0) { WARN_ON(1); goto fail; } csum: leaf = path->nodes[0]; item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); ret = 0; item = (struct btrfs_csum_item *)((unsigned char *)item + csum_offset * csum_size); found: csum_result = btrfs_csum_data(root, data, csum_result, len); btrfs_csum_final(csum_result, (char *)&csum_result); if (csum_result == 0) { printk("csum result is 0 for block %llu\n", (unsigned long long)bytenr); } write_extent_buffer(leaf, &csum_result, (unsigned long)item, csum_size); btrfs_mark_buffer_dirty(path->nodes[0]); fail: btrfs_release_path(path); btrfs_free_path(path); return ret; }