/* * Register a new MMC card with the driver model. */ int mmc_add_card(struct mmc_card *card) { int ret; const char *type; const char *uhs_bus_speed_mode = ""; static const char *const uhs_speeds[] = { [UHS_SDR12_BUS_SPEED] = "SDR12 ", [UHS_SDR25_BUS_SPEED] = "SDR25 ", [UHS_SDR50_BUS_SPEED] = "SDR50 ", [UHS_SDR104_BUS_SPEED] = "SDR104 ", [UHS_DDR50_BUS_SPEED] = "DDR50 ", }; dev_set_name(&card->dev, "%s:%04x", mmc_hostname(card->host), card->rca); switch (card->type) { case MMC_TYPE_MMC: type = "MMC"; break; case MMC_TYPE_SD: type = "SD"; if (mmc_card_blockaddr(card)) { if (mmc_card_ext_capacity(card)) type = "SDXC"; else type = "SDHC"; } break; case MMC_TYPE_SDIO: type = "SDIO"; break; case MMC_TYPE_SD_COMBO: type = "SD-combo"; if (mmc_card_blockaddr(card)) type = "SDHC-combo"; break; default: type = "?"; break; } if (mmc_sd_card_uhs(card) && (card->sd_bus_speed < ARRAY_SIZE(uhs_speeds))) uhs_bus_speed_mode = uhs_speeds[card->sd_bus_speed]; if (mmc_host_is_spi(card->host)) { pr_info("%s: new %s%s%s card on SPI\n", mmc_hostname(card->host), mmc_card_highspeed(card) ? "high speed " : "", mmc_card_ddr_mode(card) ? "DDR " : "", type); } else { pr_info("%s: new %s%s%s%s%s card at address %04x\n", mmc_hostname(card->host), mmc_card_uhs(card) ? "ultra high speed " : (mmc_card_highspeed(card) ? "high speed " : ""), (mmc_card_hs200(card) ? "HS200 " : ""), mmc_card_ddr_mode(card) ? "DDR " : "", uhs_bus_speed_mode, type, card->rca); } #ifdef CONFIG_DEBUG_FS mmc_add_card_debugfs(card); #endif mmc_init_context_info(card->host); ret = device_add(&card->dev); if (ret) return ret; mmc_card_set_present(card); return 0; }
/****************************************************************************** * module init/exit * *****************************************************************************/ static int __init mon_init(void) { int rc; if (!MACHINE_IS_VM) { pr_err("The z/VM *MONITOR record device driver cannot be " "loaded without z/VM\n"); return -ENODEV; } /* * Register with IUCV and connect to *MONITOR service */ rc = iucv_register(&monreader_iucv_handler, 1); if (rc) { pr_err("The z/VM *MONITOR record device driver failed to " "register with IUCV\n"); return rc; } rc = driver_register(&monreader_driver); if (rc) goto out_iucv; monreader_device = kzalloc(sizeof(struct device), GFP_KERNEL); if (!monreader_device) goto out_driver; dev_set_name(monreader_device, "monreader-dev"); monreader_device->bus = &iucv_bus; monreader_device->parent = iucv_root; monreader_device->driver = &monreader_driver; monreader_device->release = (void (*)(struct device *))kfree; rc = device_register(monreader_device); if (rc) { put_device(monreader_device); goto out_driver; } rc = segment_type(mon_dcss_name); if (rc < 0) { segment_warning(rc, mon_dcss_name); goto out_device; } if (rc != SEG_TYPE_SC) { pr_err("The specified *MONITOR DCSS %s does not have the " "required type SC\n", mon_dcss_name); rc = -EINVAL; goto out_device; } rc = segment_load(mon_dcss_name, SEGMENT_SHARED, &mon_dcss_start, &mon_dcss_end); if (rc < 0) { segment_warning(rc, mon_dcss_name); rc = -EINVAL; goto out_device; } dcss_mkname(mon_dcss_name, &user_data_connect[8]); /* * misc_register() has to be the last action in module_init(), because * file operations will be available right after this. */ rc = misc_register(&mon_dev); if (rc < 0 ) goto out; return 0; out: segment_unload(mon_dcss_name); out_device: device_unregister(monreader_device); out_driver: driver_unregister(&monreader_driver); out_iucv: iucv_unregister(&monreader_iucv_handler, 1); return rc; }
static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, struct nd_region_desc *ndr_desc, struct device_type *dev_type, const char *caller) { struct nd_region *nd_region; struct device *dev; void *region_buf; unsigned int i; int ro = 0; for (i = 0; i < ndr_desc->num_mappings; i++) { struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i]; struct nvdimm *nvdimm = nd_mapping->nvdimm; if ((nd_mapping->start | nd_mapping->size) % SZ_4K) { dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n", caller, dev_name(&nvdimm->dev), i); return NULL; } if (nvdimm->flags & NDD_UNARMED) ro = 1; } if (dev_type == &nd_blk_device_type) { struct nd_blk_region_desc *ndbr_desc; struct nd_blk_region *ndbr; ndbr_desc = to_blk_region_desc(ndr_desc); ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping) * ndr_desc->num_mappings, GFP_KERNEL); if (ndbr) { nd_region = &ndbr->nd_region; ndbr->enable = ndbr_desc->enable; ndbr->disable = ndbr_desc->disable; ndbr->do_io = ndbr_desc->do_io; } region_buf = ndbr; } else { nd_region = kzalloc(sizeof(struct nd_region) + sizeof(struct nd_mapping) * ndr_desc->num_mappings, GFP_KERNEL); region_buf = nd_region; } if (!region_buf) return NULL; nd_region->id = ida_simple_get(®ion_ida, 0, 0, GFP_KERNEL); if (nd_region->id < 0) goto err_id; nd_region->lane = alloc_percpu(struct nd_percpu_lane); if (!nd_region->lane) goto err_percpu; for (i = 0; i < nr_cpu_ids; i++) { struct nd_percpu_lane *ndl; ndl = per_cpu_ptr(nd_region->lane, i); spin_lock_init(&ndl->lock); ndl->count = 0; } memcpy(nd_region->mapping, ndr_desc->nd_mapping, sizeof(struct nd_mapping) * ndr_desc->num_mappings); for (i = 0; i < ndr_desc->num_mappings; i++) { struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i]; struct nvdimm *nvdimm = nd_mapping->nvdimm; get_device(&nvdimm->dev); } nd_region->ndr_mappings = ndr_desc->num_mappings; nd_region->provider_data = ndr_desc->provider_data; nd_region->nd_set = ndr_desc->nd_set; nd_region->num_lanes = ndr_desc->num_lanes; nd_region->flags = ndr_desc->flags; nd_region->ro = ro; nd_region->numa_node = ndr_desc->numa_node; ida_init(&nd_region->ns_ida); ida_init(&nd_region->btt_ida); ida_init(&nd_region->pfn_ida); dev = &nd_region->dev; dev_set_name(dev, "region%d", nd_region->id); dev->parent = &nvdimm_bus->dev; dev->type = dev_type; dev->groups = ndr_desc->attr_groups; nd_region->ndr_size = resource_size(ndr_desc->res); nd_region->ndr_start = ndr_desc->res->start; nd_device_register(dev); return nd_region; err_percpu: ida_simple_remove(®ion_ida, nd_region->id); err_id: kfree(region_buf); return NULL; }
int __init xpc_init(void) { int ret; struct task_struct *kthread; dev_set_name(xpc_part, "part"); dev_set_name(xpc_chan, "chan"); if (is_shub()) { /* * The ia64-sn2 architecture supports at most 64 partitions. * And the inability to unregister remote amos restricts us * further to only support exactly 64 partitions on this * architecture, no less. */ if (xp_max_npartitions != 64) { dev_err(xpc_part, "max #of partitions not set to 64\n"); ret = -EINVAL; } else { ret = xpc_init_sn2(); } } else if (is_uv()) { ret = xpc_init_uv(); } else { ret = -ENODEV; } if (ret != 0) return ret; ret = xpc_setup_partitions(); if (ret != 0) { dev_err(xpc_part, "can't get memory for partition structure\n"); goto out_1; } xpc_sysctl = register_sysctl_table(xpc_sys_dir); /* * Fill the partition reserved page with the information needed by * other partitions to discover we are alive and establish initial * communications. */ ret = xpc_setup_rsvd_page(); if (ret != 0) { dev_err(xpc_part, "can't setup our reserved page\n"); goto out_2; } /* add ourselves to the reboot_notifier_list */ ret = register_reboot_notifier(&xpc_reboot_notifier); if (ret != 0) dev_warn(xpc_part, "can't register reboot notifier\n"); /* add ourselves to the die_notifier list */ ret = register_die_notifier(&xpc_die_notifier); if (ret != 0) dev_warn(xpc_part, "can't register die notifier\n"); /* * The real work-horse behind xpc. This processes incoming * interrupts and monitors remote heartbeats. */ kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME); if (IS_ERR(kthread)) { dev_err(xpc_part, "failed while forking hb check thread\n"); ret = -EBUSY; goto out_3; } /* * Startup a thread that will attempt to discover other partitions to * activate based on info provided by SAL. This new thread is short * lived and will exit once discovery is complete. */ kthread = kthread_run(xpc_initiate_discovery, NULL, XPC_DISCOVERY_THREAD_NAME); if (IS_ERR(kthread)) { dev_err(xpc_part, "failed while forking discovery thread\n"); /* mark this new thread as a non-starter */ complete(&xpc_discovery_exited); xpc_do_exit(xpUnloading); return -EBUSY; } /* set the interface to point at XPC's functions */ xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect, xpc_initiate_send, xpc_initiate_send_notify, xpc_initiate_received, xpc_initiate_partid_to_nasids); return 0; /* initialization was not successful */ out_3: xpc_teardown_rsvd_page(); (void)unregister_die_notifier(&xpc_die_notifier); (void)unregister_reboot_notifier(&xpc_reboot_notifier); out_2: if (xpc_sysctl) unregister_sysctl_table(xpc_sysctl); xpc_teardown_partitions(); out_1: if (is_shub()) xpc_exit_sn2(); else if (is_uv()) xpc_exit_uv(); return ret; }
static struct macio_dev * macio_add_one_device(struct macio_chip *chip, struct device *parent, struct device_node *np, struct macio_dev *in_bay, struct resource *parent_res) { struct macio_dev *dev; const u32 *reg; if (np == NULL) return NULL; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return NULL; dev->bus = &chip->lbus; dev->media_bay = in_bay; dev->ofdev.dev.of_node = np; dev->ofdev.archdata.dma_mask = 0xffffffffUL; dev->ofdev.dev.dma_mask = &dev->ofdev.archdata.dma_mask; dev->ofdev.dev.parent = parent; dev->ofdev.dev.bus = &macio_bus_type; dev->ofdev.dev.release = macio_release_dev; dev->ofdev.dev.dma_parms = &dev->dma_parms; /* Standard DMA paremeters */ dma_set_max_seg_size(&dev->ofdev.dev, 65536); dma_set_seg_boundary(&dev->ofdev.dev, 0xffffffff); #ifdef CONFIG_PCI /* Set the DMA ops to the ones from the PCI device, this could be * fishy if we didn't know that on PowerMac it's always direct ops * or iommu ops that will work fine */ dev->ofdev.dev.archdata.dma_ops = chip->lbus.pdev->dev.archdata.dma_ops; dev->ofdev.dev.archdata.dma_data = chip->lbus.pdev->dev.archdata.dma_data; #endif /* CONFIG_PCI */ #ifdef DEBUG printk("preparing mdev @%p, ofdev @%p, dev @%p, kobj @%p\n", dev, &dev->ofdev, &dev->ofdev.dev, &dev->ofdev.dev.kobj); #endif /* MacIO itself has a different reg, we use it's PCI base */ if (np == chip->of_node) { dev_set_name(&dev->ofdev.dev, "%1d.%08x:%.*s", chip->lbus.index, #ifdef CONFIG_PCI (unsigned int)pci_resource_start(chip->lbus.pdev, 0), #else 0, /* NuBus may want to do something better here */ #endif MAX_NODE_NAME_SIZE, np->name); } else { reg = of_get_property(np, "reg", NULL); dev_set_name(&dev->ofdev.dev, "%1d.%08x:%.*s", chip->lbus.index, reg ? *reg : 0, MAX_NODE_NAME_SIZE, np->name); } /* Setup interrupts & resources */ macio_setup_interrupts(dev); macio_setup_resources(dev, parent_res); macio_add_missing_resources(dev); /* Register with core */ if (of_device_register(&dev->ofdev) != 0) { printk(KERN_DEBUG"macio: device registration error for %s!\n", dev_name(&dev->ofdev.dev)); kfree(dev); return NULL; } return dev; }
static void __init omap_hsmmc_init_one(struct omap2_hsmmc_info *hsmmcinfo, int ctrl_nr) { struct omap_hwmod *oh; struct omap_hwmod *ohs[1]; struct omap_device *od; struct platform_device *pdev; char oh_name[MAX_OMAP_MMC_HWMOD_NAME_LEN]; struct omap_hsmmc_platform_data *mmc_data; struct omap_hsmmc_dev_attr *mmc_dev_attr; char *name; int res; mmc_data = kzalloc(sizeof(*mmc_data), GFP_KERNEL); if (!mmc_data) return; res = omap_hsmmc_pdata_init(hsmmcinfo, mmc_data); if (res < 0) goto free_mmc; name = "omap_hsmmc"; res = snprintf(oh_name, MAX_OMAP_MMC_HWMOD_NAME_LEN, "mmc%d", ctrl_nr); WARN(res >= MAX_OMAP_MMC_HWMOD_NAME_LEN, "String buffer overflow in MMC%d device setup\n", ctrl_nr); oh = omap_hwmod_lookup(oh_name); if (!oh) { pr_err("Could not look up %s\n", oh_name); goto free_name; } ohs[0] = oh; if (oh->dev_attr != NULL) { mmc_dev_attr = oh->dev_attr; mmc_data->controller_flags = mmc_dev_attr->flags; /* * erratum 2.1.1.128 doesn't apply if board has * a transceiver is attached */ if (hsmmcinfo->transceiver) mmc_data->controller_flags &= ~OMAP_HSMMC_BROKEN_MULTIBLOCK_READ; } pdev = platform_device_alloc(name, ctrl_nr - 1); if (!pdev) { pr_err("Could not allocate pdev for %s\n", name); goto free_name; } dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id); od = omap_device_alloc(pdev, ohs, 1); if (IS_ERR(od)) { pr_err("Could not allocate od for %s\n", name); goto put_pdev; } res = platform_device_add_data(pdev, mmc_data, sizeof(struct omap_hsmmc_platform_data)); if (res) { pr_err("Could not add pdata for %s\n", name); goto put_pdev; } hsmmcinfo->pdev = pdev; if (hsmmcinfo->deferred) goto free_mmc; res = omap_device_register(pdev); if (res) { pr_err("Could not register od for %s\n", name); goto free_od; } goto free_mmc; free_od: omap_device_delete(od); put_pdev: platform_device_put(pdev); free_name: kfree(mmc_data->name); free_mmc: kfree(mmc_data); }
/** * omap_hsmmc_reset() - Full reset of each HS-MMC controller * * Ensure that each MMC controller is fully reset. Controllers * left in an unknown state (by bootloader) may prevent retention * or OFF-mode. This is especially important in cases where the * MMC driver is not enabled, _or_ built as a module. * * In order for reset to work, interface, functional and debounce * clocks must be enabled. The debounce clock comes from func_32k_clk * and is not under SW control, so we only enable i- and f-clocks. **/ static void __init omap_hsmmc_reset(void) { u32 i, nr_controllers = cpu_is_omap44xx() ? OMAP44XX_NR_MMC : (cpu_is_omap34xx() ? OMAP34XX_NR_MMC : OMAP24XX_NR_MMC); for (i = 0; i < nr_controllers; i++) { u32 v, base = 0; struct clk *iclk, *fclk; struct device *dev = &dummy_pdev.dev; switch (i) { case 0: base = OMAP2_MMC1_BASE; break; case 1: base = OMAP2_MMC2_BASE; break; case 2: base = OMAP3_MMC3_BASE; break; case 3: if (!cpu_is_omap44xx()) return; base = OMAP4_MMC4_BASE; break; case 4: if (!cpu_is_omap44xx()) return; base = OMAP4_MMC5_BASE; break; } if (cpu_is_omap44xx()) base += OMAP4_MMC_REG_OFFSET; dummy_pdev.id = i; dev_set_name(&dummy_pdev.dev, "mmci-omap-hs.%d", i); iclk = clk_get(dev, "ick"); if (iclk && clk_enable(iclk)) iclk = NULL; fclk = clk_get(dev, "fck"); if (fclk && clk_enable(fclk)) fclk = NULL; if (!iclk || !fclk) { printk(KERN_WARNING "%s: Unable to enable clocks for MMC%d, " "cannot reset.\n", __func__, i); break; } omap_writel(MMCHS_SYSCONFIG_SWRESET, base + MMCHS_SYSCONFIG); v = omap_readl(base + MMCHS_SYSSTATUS); while (!(omap_readl(base + MMCHS_SYSSTATUS) & MMCHS_SYSSTATUS_RESETDONE)) cpu_relax(); if (fclk) { clk_disable(fclk); clk_put(fclk); } if (iclk) { clk_disable(iclk); clk_put(iclk); } } }
static int zynq_rpmsg_initialize(struct platform_device *pdev) { int ret = 0; int index; struct virtio_device *virtio_dev; /* Register ipi handler. */ ret = set_ipi_handler(zynq_rpmsg_p->vring0, ipi_handler, "Firmware kick"); if (ret) { dev_err(&pdev->dev, "IPI handler already registered\n"); return -ENODEV; } /* Initialize work. */ INIT_WORK(&zynq_rpmsg_work, handle_event); /* Memory allocations for vrings. */ ret = dma_declare_coherent_memory(&pdev->dev, zynq_rpmsg_p->mem_start, zynq_rpmsg_p->mem_start, zynq_rpmsg_p->mem_end - zynq_rpmsg_p->mem_start + 1, DMA_MEMORY_IO); if (!ret) { dev_err(&pdev->dev, "dma_declare_coherent_memory failed\n"); return -ENODEV; } ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (ret) { dev_err(&pdev->dev, "dma_set_coherent_mask: %d\n", ret); return -ENODEV; } /* Initialize a mid-level device. Needed because of bad data structure * handling and assumptions within the virtio rpmsg bus. We are doing it * to just make sure that the virtio device has a parent device which * then itself has a parent in the form of the platform device. */ device_initialize(&(zynq_rpmsg_p->mid_dev)); zynq_rpmsg_p->mid_dev.parent = &(pdev->dev); zynq_rpmsg_p->mid_dev.type = &mid_level_type; index = ida_simple_get(&rpmsg_zynq_dev_index, 0, 0, GFP_KERNEL); if (index < 0) { put_device(&(zynq_rpmsg_p->mid_dev)); return -ENODEV; } dev_set_name(&(zynq_rpmsg_p->mid_dev), "rpmsg_mid%d", index); device_add(&(zynq_rpmsg_p->mid_dev)); /* Setup the virtio device structure. */ virtio_dev = &(zynq_rpmsg_p->virtio_dev); virtio_dev->id.device = zynq_rpmsg_p->virtioid; virtio_dev->config = &zynq_rpmsg_virtio_config_ops; virtio_dev->dev.parent = &(zynq_rpmsg_p->mid_dev); virtio_dev->dev.release = zynq_rpmsg_vdev_release; /* Register the virtio device. */ ret = register_virtio_device(virtio_dev); dev_info(&(zynq_rpmsg_platform->dev), "virtio device registered \r\n"); return ret; }
int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv) { struct usbhsg_gpriv *gpriv; struct usbhsg_uep *uep; struct device *dev = usbhs_priv_to_dev(priv); int pipe_size = usbhs_get_dparam(priv, pipe_size); int i; int ret; gpriv = kzalloc(sizeof(struct usbhsg_gpriv), GFP_KERNEL); if (!gpriv) { dev_err(dev, "Could not allocate gadget priv\n"); return -ENOMEM; } uep = kzalloc(sizeof(struct usbhsg_uep) * pipe_size, GFP_KERNEL); if (!uep) { dev_err(dev, "Could not allocate ep\n"); ret = -ENOMEM; goto usbhs_mod_gadget_probe_err_gpriv; } /* * CAUTION * * There is no guarantee that it is possible to access usb module here. * Don't accesses to it. * The accesse will be enable after "usbhsg_start" */ /* * register itself */ usbhs_mod_register(priv, &gpriv->mod, USBHS_GADGET); /* init gpriv */ gpriv->mod.name = "gadget"; gpriv->mod.start = usbhsg_start; gpriv->mod.stop = usbhsg_stop; gpriv->uep = uep; gpriv->uep_size = pipe_size; usbhsg_status_init(gpriv); /* * init gadget */ device_initialize(&gpriv->gadget.dev); dev_set_name(&gpriv->gadget.dev, "gadget"); gpriv->gadget.dev.parent = dev; gpriv->gadget.name = "renesas_usbhs_udc"; gpriv->gadget.ops = &usbhsg_gadget_ops; gpriv->gadget.is_dualspeed = 1; INIT_LIST_HEAD(&gpriv->gadget.ep_list); /* * init usb_ep */ usbhsg_for_each_uep_with_dcp(uep, gpriv, i) { uep->gpriv = gpriv; snprintf(uep->ep_name, EP_NAME_SIZE, "ep%d", i); uep->ep.name = uep->ep_name; uep->ep.ops = &usbhsg_ep_ops; INIT_LIST_HEAD(&uep->ep.ep_list); INIT_LIST_HEAD(&uep->list); /* init DCP */ if (usbhsg_is_dcp(uep)) { gpriv->gadget.ep0 = &uep->ep; uep->ep.maxpacket = 64; } /* init normal pipe */ else { uep->ep.maxpacket = 512; list_add_tail(&uep->ep.ep_list, &gpriv->gadget.ep_list); } }
static int vlynq_probe(struct platform_device *pdev) { struct vlynq_device *dev; struct resource *regs_res, *mem_res, *irq_res; int len, result; regs_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); if (!regs_res) return -ENODEV; mem_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); if (!mem_res) return -ENODEV; irq_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "devirq"); if (!irq_res) return -ENODEV; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { printk(KERN_ERR "vlynq: failed to allocate device structure\n"); return -ENOMEM; } dev->id = pdev->id; dev->dev.bus = &vlynq_bus_type; dev->dev.parent = &pdev->dev; dev_set_name(&dev->dev, "vlynq%d", dev->id); dev->dev.platform_data = pdev->dev.platform_data; dev->dev.release = vlynq_device_release; dev->regs_start = regs_res->start; dev->regs_end = regs_res->end; dev->mem_start = mem_res->start; dev->mem_end = mem_res->end; len = resource_size(regs_res); if (!request_mem_region(regs_res->start, len, dev_name(&dev->dev))) { printk(KERN_ERR "%s: Can't request vlynq registers\n", dev_name(&dev->dev)); result = -ENXIO; goto fail_request; } dev->local = ioremap(regs_res->start, len); if (!dev->local) { printk(KERN_ERR "%s: Can't remap vlynq registers\n", dev_name(&dev->dev)); result = -ENXIO; goto fail_remap; } dev->remote = (struct vlynq_regs *)((void *)dev->local + VLYNQ_REMOTE_OFFSET); dev->irq = platform_get_irq_byname(pdev, "irq"); dev->irq_start = irq_res->start; dev->irq_end = irq_res->end; dev->local_irq = dev->irq_end - dev->irq_start; dev->remote_irq = dev->local_irq - 1; if (device_register(&dev->dev)) goto fail_register; platform_set_drvdata(pdev, dev); printk(KERN_INFO "%s: regs 0x%p, irq %d, mem 0x%p\n", dev_name(&dev->dev), (void *)dev->regs_start, dev->irq, (void *)dev->mem_start); dev->dev_id = 0; dev->divisor = vlynq_div_auto; result = __vlynq_enable_device(dev); if (result == 0) { dev->dev_id = readl(&dev->remote->chip); ((struct plat_vlynq_ops *)(dev->dev.platform_data))->off(dev); } if (dev->dev_id) printk(KERN_INFO "Found a VLYNQ device: %08x\n", dev->dev_id); return 0; fail_register: iounmap(dev->local); fail_remap: fail_request: release_mem_region(regs_res->start, len); kfree(dev); return result; }
static int hi3630_srcup_normal_probe(struct platform_device *pdev) { int ret = -1; struct device *dev = &pdev->dev; struct hi3630_srcup_data *pdata = NULL; if (!dev) { loge("platform_device has no device\n"); return -ENOENT; } pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) { dev_err(dev, "cannot allocate hi3630 srcup platform data\n"); return -ENOMEM; } pdata->dev = dev; #ifdef SRCUP_ENABLE pdata->hi3630_asp_irq = dev_get_drvdata(pdev->dev.parent); if (!pdata->hi3630_asp_irq) { dev_err(dev, "get parent device error\n"); return -ENOENT; } pdata->irq = platform_get_irq_byname(pdev, "asp_srcup_normal"); if (0 > pdata->irq) { dev_err(dev, "cannot get irq\n"); return -ENOENT; } pdata->regu.supply = "asp-srcup-normal"; ret = devm_regulator_bulk_get(dev, 1, &(pdata->regu)); if (0 != ret) { dev_err(dev, "couldn't get regulators %d\n", ret); return -ENOENT; } pdata->hwlock = hwspin_lock_request_specific(HWLOCK_ID); if(NULL == pdata->hwlock) { dev_err(dev, "couldn't request hwlock:%d\n", HWLOCK_ID); return -ENOENT; } #ifdef CONFIG_PM_RUNTIME pm_runtime_set_autosuspend_delay(dev, 100); /* 100ms*/ pm_runtime_use_autosuspend(dev); pm_runtime_enable(dev); #endif #endif platform_set_drvdata(pdev, pdata); dev_set_name(dev, "hi3630-srcup-normal"); ret = snd_soc_register_platform(dev, &hi3630_srcup_normal_platform); if (ret) { loge("snd_soc_register_platform return %d\n", ret); return -ENODEV; } return ret; }
/* * Register a new MMC card with the driver model. */ int mmc_add_card(struct mmc_card *card) { int ret; const char *type; const char *uhs_bus_speed_mode = ""; dev_set_name(&card->dev, "%s:%04x", mmc_hostname(card->host), card->rca); switch (card->type) { case MMC_TYPE_MMC: type = "MMC"; break; case MMC_TYPE_SD: type = "SD"; if (mmc_card_blockaddr(card)) { if (mmc_card_ext_capacity(card)) type = "SDXC"; else type = "SDHC"; } break; case MMC_TYPE_SDIO: type = "SDIO"; break; case MMC_TYPE_SD_COMBO: type = "SD-combo"; if (mmc_card_blockaddr(card)) type = "SDHC-combo"; break; default: type = "?"; break; } if (mmc_sd_card_uhs(card)) { switch (card->sd_bus_speed) { case UHS_SDR104_BUS_SPEED: uhs_bus_speed_mode = "SDR104 "; break; case UHS_SDR50_BUS_SPEED: uhs_bus_speed_mode = "SDR50 "; break; case UHS_DDR50_BUS_SPEED: uhs_bus_speed_mode = "DDR50 "; break; case UHS_SDR25_BUS_SPEED: uhs_bus_speed_mode = "SDR25 "; break; case UHS_SDR12_BUS_SPEED: uhs_bus_speed_mode = "SDR12 "; break; default: uhs_bus_speed_mode = ""; break; } } if (mmc_host_is_spi(card->host)) { printk(KERN_INFO "%s: new %s%s%s card on SPI\n", mmc_hostname(card->host), mmc_card_highspeed(card) ? "high speed " : "", mmc_card_ddr_mode(card) ? "DDR " : "", type); } else { pr_info("%s: new %s%s%s%s card at address %04x\n", mmc_hostname(card->host), mmc_sd_card_uhs(card) ? "ultra high speed " : (mmc_card_highspeed(card) ? "high speed " : ""), mmc_card_ddr_mode(card) ? "DDR " : "", uhs_bus_speed_mode, type, card->rca); } #ifdef CONFIG_DEBUG_FS mmc_add_card_debugfs(card); #endif ret = device_add(&card->dev); if (ret) return ret; mmc_card_set_present(card); return 0; }
static struct amba_device *of_amba_device_create(struct device_node *node, const char *bus_id, void *platform_data, struct device *parent) { struct amba_device *dev; const void *prop; int i, ret; pr_debug("Creating amba device %s\n", node->full_name); if (!of_device_is_available(node) || of_node_test_and_set_flag(node, OF_POPULATED)) return NULL; dev = amba_device_alloc(NULL, 0, 0); if (!dev) goto err_clear_flag; /* setup generic device info */ dev->dev.of_node = of_node_get(node); dev->dev.fwnode = &node->fwnode; dev->dev.parent = parent ? : &platform_bus; dev->dev.platform_data = platform_data; if (bus_id) dev_set_name(&dev->dev, "%s", bus_id); else of_device_make_bus_id(&dev->dev); of_dma_configure(&dev->dev, dev->dev.of_node); /* Allow the HW Peripheral ID to be overridden */ prop = of_get_property(node, "arm,primecell-periphid", NULL); if (prop) dev->periphid = of_read_ulong(prop, 1); /* Decode the IRQs and address ranges */ for (i = 0; i < AMBA_NR_IRQS; i++) dev->irq[i] = irq_of_parse_and_map(node, i); ret = of_address_to_resource(node, 0, &dev->res); if (ret) { pr_err("amba: of_address_to_resource() failed (%d) for %s\n", ret, node->full_name); goto err_free; } ret = amba_device_add(dev, &iomem_resource); if (ret) { pr_err("amba_device_add() failed (%d) for %s\n", ret, node->full_name); goto err_free; } return dev; err_free: amba_device_put(dev); err_clear_flag: of_node_clear_flag(node, OF_POPULATED); return NULL; }
/** * ccwgroup_create_from_string() - create and register a ccw group device * @root: parent device for the new device * @creator_id: identifier of creating driver * @cdrv: ccw driver of slave devices * @num_devices: number of slave devices * @buf: buffer containing comma separated bus ids of slave devices * * Create and register a new ccw group device as a child of @root. Slave * devices are obtained from the list of bus ids given in @buf and must all * belong to @cdrv. * Returns: * %0 on success and an error code on failure. * Context: * non-atomic */ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id, struct ccw_driver *cdrv, int num_devices, const char *buf) { struct ccwgroup_device *gdev; int rc, i; char tmp_bus_id[CCW_BUS_ID_SIZE]; const char *curr_buf; gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]), GFP_KERNEL); if (!gdev) return -ENOMEM; atomic_set(&gdev->onoff, 0); mutex_init(&gdev->reg_mutex); mutex_lock(&gdev->reg_mutex); gdev->creator_id = creator_id; gdev->count = num_devices; gdev->dev.bus = &ccwgroup_bus_type; gdev->dev.parent = root; gdev->dev.release = ccwgroup_release; device_initialize(&gdev->dev); curr_buf = buf; for (i = 0; i < num_devices && curr_buf; i++) { rc = __get_next_bus_id(&curr_buf, tmp_bus_id); if (rc != 0) goto error; if (!__is_valid_bus_id(tmp_bus_id)) { rc = -EINVAL; goto error; } gdev->cdev[i] = get_ccwdev_by_busid(cdrv, tmp_bus_id); /* * All devices have to be of the same type in * order to be grouped. */ if (!gdev->cdev[i] || gdev->cdev[i]->id.driver_info != gdev->cdev[0]->id.driver_info) { rc = -EINVAL; goto error; } /* Don't allow a device to belong to more than one group. */ spin_lock_irq(gdev->cdev[i]->ccwlock); if (dev_get_drvdata(&gdev->cdev[i]->dev)) { spin_unlock_irq(gdev->cdev[i]->ccwlock); rc = -EINVAL; goto error; } dev_set_drvdata(&gdev->cdev[i]->dev, gdev); spin_unlock_irq(gdev->cdev[i]->ccwlock); } /* Check for sufficient number of bus ids. */ if (i < num_devices && !curr_buf) { rc = -EINVAL; goto error; } /* Check for trailing stuff. */ if (i == num_devices && strlen(curr_buf) > 0) { rc = -EINVAL; goto error; } dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev)); rc = device_add(&gdev->dev); if (rc) goto error; get_device(&gdev->dev); rc = device_create_file(&gdev->dev, &dev_attr_ungroup); if (rc) { device_unregister(&gdev->dev); goto error; } rc = __ccwgroup_create_symlinks(gdev); if (!rc) { mutex_unlock(&gdev->reg_mutex); put_device(&gdev->dev); return 0; } device_remove_file(&gdev->dev, &dev_attr_ungroup); device_unregister(&gdev->dev); error: for (i = 0; i < num_devices; i++) if (gdev->cdev[i]) { spin_lock_irq(gdev->cdev[i]->ccwlock); if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev) dev_set_drvdata(&gdev->cdev[i]->dev, NULL); spin_unlock_irq(gdev->cdev[i]->ccwlock); put_device(&gdev->cdev[i]->dev); gdev->cdev[i] = NULL; } mutex_unlock(&gdev->reg_mutex); put_device(&gdev->dev); return rc; }
/** * chp_new - register a new channel-path * @chpid - channel-path ID * * Create and register data structure representing new channel-path. Return * zero on success, non-zero otherwise. */ int chp_new(struct chp_id chpid) { struct channel_path *chp; int ret; if (chp_is_registered(chpid)) return 0; chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL); if (!chp) return -ENOMEM; /* fill in status, etc. */ chp->chpid = chpid; chp->state = 1; chp->dev.parent = &channel_subsystems[chpid.cssid]->device; chp->dev.release = chp_release; dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id); /* Obtain channel path description and fill it in. */ ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc); if (ret) goto out_free; if ((chp->desc.flags & 0x80) == 0) { ret = -ENODEV; goto out_free; } /* Get channel-measurement characteristics. */ if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) { ret = chsc_get_channel_measurement_chars(chp); if (ret) goto out_free; } else { chp->cmg = -1; } /* make it known to the system */ ret = device_register(&chp->dev); if (ret) { CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n", chpid.cssid, chpid.id, ret); goto out_free; } ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); if (ret) { device_unregister(&chp->dev); goto out; } mutex_lock(&channel_subsystems[chpid.cssid]->mutex); if (channel_subsystems[chpid.cssid]->cm_enabled) { ret = chp_add_cmg_attr(chp); if (ret) { sysfs_remove_group(&chp->dev.kobj, &chp_attr_group); device_unregister(&chp->dev); mutex_unlock(&channel_subsystems[chpid.cssid]->mutex); goto out; } } channel_subsystems[chpid.cssid]->chps[chpid.id] = chp; mutex_unlock(&channel_subsystems[chpid.cssid]->mutex); goto out; out_free: kfree(chp); out: return ret; }
static int msm72k_probe(struct platform_device *pdev) { struct resource *res; struct usb_info *ui; int irq; int ret; INFO("msm72k_probe\n"); ui = kzalloc(sizeof(struct usb_info), GFP_KERNEL); if (!ui) return -ENOMEM; spin_lock_init(&ui->lock); ui->pdev = pdev; if (pdev->dev.platform_data) { struct msm_hsusb_platform_data *pdata = pdev->dev.platform_data; ui->phy_reset = pdata->phy_reset; ui->phy_init_seq = pdata->phy_init_seq; ui->usb_connected = pdata->usb_connected; } irq = platform_get_irq(pdev, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res || (irq < 0)) return usb_free(ui, -ENODEV); ui->addr = ioremap(res->start, 4096); if (!ui->addr) return usb_free(ui, -ENOMEM); ui->buf = dma_alloc_coherent(&pdev->dev, 4096, &ui->dma, GFP_KERNEL); if (!ui->buf) return usb_free(ui, -ENOMEM); ui->pool = dma_pool_create("msm72k_udc", NULL, 32, 32, 0); if (!ui->pool) return usb_free(ui, -ENOMEM); INFO("msm72k_probe() io=%p, irq=%d, dma=%p(%x)\n", ui->addr, irq, ui->buf, ui->dma); ui->clk = clk_get(&pdev->dev, "usb_hs_clk"); if (IS_ERR(ui->clk)) return usb_free(ui, PTR_ERR(ui->clk)); ui->pclk = clk_get(&pdev->dev, "usb_hs_pclk"); if (IS_ERR(ui->pclk)) return usb_free(ui, PTR_ERR(ui->pclk)); ui->otgclk = clk_get(&pdev->dev, "usb_otg_clk"); if (IS_ERR(ui->otgclk)) ui->otgclk = NULL; ui->coreclk = clk_get(&pdev->dev, "usb_hs_core_clk"); if (IS_ERR(ui->coreclk)) ui->coreclk = NULL; ui->ebi1clk = clk_get(NULL, "ebi1_clk"); if (IS_ERR(ui->ebi1clk)) return usb_free(ui, PTR_ERR(ui->ebi1clk)); /* clear interrupts before requesting irq */ if (ui->coreclk) clk_enable(ui->coreclk); clk_enable(ui->clk); clk_enable(ui->pclk); if (ui->otgclk) clk_enable(ui->otgclk); writel(0, USB_USBINTR); writel(0, USB_OTGSC); if (ui->coreclk) clk_disable(ui->coreclk); if (ui->otgclk) clk_disable(ui->otgclk); clk_disable(ui->pclk); clk_disable(ui->clk); ret = request_irq(irq, usb_interrupt, 0, pdev->name, ui); if (ret) return usb_free(ui, ret); enable_irq_wake(irq); ui->irq = irq; ui->gadget.ops = &msm72k_ops; ui->gadget.is_dualspeed = 1; device_initialize(&ui->gadget.dev); dev_set_name(&ui->gadget.dev, "gadget"); ui->gadget.dev.parent = &pdev->dev; ui->gadget.dev.dma_mask = pdev->dev.dma_mask; the_usb_info = ui; usb_debugfs_init(ui); usb_prepare(ui); return 0; }
/** * zfcp_port_enqueue - enqueue port to port list of adapter * @adapter: adapter where remote port is added * @wwpn: WWPN of the remote port to be enqueued * @status: initial status for the port * @d_id: destination id of the remote port to be enqueued * Returns: pointer to enqueued port on success, ERR_PTR on error * * All port internal structures are set up and the sysfs entry is generated. * d_id is used to enqueue ports with a well known address like the Directory * Service for nameserver lookup. */ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, u32 status, u32 d_id) { struct zfcp_port *port; int retval = -ENOMEM; kref_get(&adapter->ref); port = zfcp_get_port_by_wwpn(adapter, wwpn); if (port) { put_device(&port->dev); retval = -EEXIST; goto err_out; } port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL); if (!port) goto err_out; rwlock_init(&port->unit_list_lock); INIT_LIST_HEAD(&port->unit_list); atomic_set(&port->units, 0); INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup); INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work); INIT_WORK(&port->rport_work, zfcp_scsi_rport_work); port->adapter = adapter; port->d_id = d_id; port->wwpn = wwpn; port->rport_task = RPORT_NONE; port->dev.parent = &adapter->ccw_device->dev; port->dev.release = zfcp_port_release; if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) { kfree(port); goto err_out; } retval = -EINVAL; if (device_register(&port->dev)) { put_device(&port->dev); goto err_out; } if (sysfs_create_group(&port->dev.kobj, &zfcp_sysfs_port_attrs)) goto err_out_put; write_lock_irq(&adapter->port_list_lock); list_add_tail(&port->list, &adapter->port_list); write_unlock_irq(&adapter->port_list_lock); atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status); return port; err_out_put: device_unregister(&port->dev); err_out: zfcp_ccw_adapter_put(adapter); return ERR_PTR(retval); }
/** * zfcp_unit_enqueue - enqueue unit to unit list of a port. * @port: pointer to port where unit is added * @fcp_lun: FCP LUN of unit to be enqueued * Returns: pointer to enqueued unit on success, ERR_PTR on error * * Sets up some unit internal structures and creates sysfs entry. */ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) { struct zfcp_unit *unit; int retval = -ENOMEM; get_device(&port->dev); unit = zfcp_get_unit_by_lun(port, fcp_lun); if (unit) { put_device(&unit->dev); retval = -EEXIST; goto err_out; } unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); if (!unit) goto err_out; unit->port = port; unit->fcp_lun = fcp_lun; unit->dev.parent = &port->dev; unit->dev.release = zfcp_unit_release; if (dev_set_name(&unit->dev, "0x%016llx", (unsigned long long) fcp_lun)) { kfree(unit); goto err_out; } retval = -EINVAL; INIT_WORK(&unit->scsi_work, zfcp_scsi_scan); spin_lock_init(&unit->latencies.lock); unit->latencies.write.channel.min = 0xFFFFFFFF; unit->latencies.write.fabric.min = 0xFFFFFFFF; unit->latencies.read.channel.min = 0xFFFFFFFF; unit->latencies.read.fabric.min = 0xFFFFFFFF; unit->latencies.cmd.channel.min = 0xFFFFFFFF; unit->latencies.cmd.fabric.min = 0xFFFFFFFF; if (device_register(&unit->dev)) { put_device(&unit->dev); goto err_out; } if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) goto err_out_put; write_lock_irq(&port->unit_list_lock); list_add_tail(&unit->list, &port->unit_list); write_unlock_irq(&port->unit_list_lock); atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status); return unit; err_out_put: device_unregister(&unit->dev); err_out: put_device(&port->dev); return ERR_PTR(retval); }
int __init xpc_init(void) { int ret; struct task_struct *kthread; dev_set_name(xpc_part, "part"); dev_set_name(xpc_chan, "chan"); if (is_shub()) { /* */ if (xp_max_npartitions != 64) { dev_err(xpc_part, "max #of partitions not set to 64\n"); ret = -EINVAL; } else { ret = xpc_init_sn2(); } } else if (is_uv()) { ret = xpc_init_uv(); } else { ret = -ENODEV; } if (ret != 0) return ret; ret = xpc_setup_partitions(); if (ret != 0) { dev_err(xpc_part, "can't get memory for partition structure\n"); goto out_1; } xpc_sysctl = register_sysctl_table(xpc_sys_dir); /* */ ret = xpc_setup_rsvd_page(); if (ret != 0) { dev_err(xpc_part, "can't setup our reserved page\n"); goto out_2; } /* */ ret = register_reboot_notifier(&xpc_reboot_notifier); if (ret != 0) dev_warn(xpc_part, "can't register reboot notifier\n"); /* */ ret = register_die_notifier(&xpc_die_notifier); if (ret != 0) dev_warn(xpc_part, "can't register die notifier\n"); /* */ kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME); if (IS_ERR(kthread)) { dev_err(xpc_part, "failed while forking hb check thread\n"); ret = -EBUSY; goto out_3; } /* */ kthread = kthread_run(xpc_initiate_discovery, NULL, XPC_DISCOVERY_THREAD_NAME); if (IS_ERR(kthread)) { dev_err(xpc_part, "failed while forking discovery thread\n"); /* */ complete(&xpc_discovery_exited); xpc_do_exit(xpUnloading); return -EBUSY; } /* */ xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect, xpc_initiate_send, xpc_initiate_send_notify, xpc_initiate_received, xpc_initiate_partid_to_nasids); return 0; /* */ out_3: xpc_teardown_rsvd_page(); (void)unregister_die_notifier(&xpc_die_notifier); (void)unregister_reboot_notifier(&xpc_reboot_notifier); out_2: if (xpc_sysctl) unregister_sysctl_table(xpc_sysctl); xpc_teardown_partitions(); out_1: if (is_shub()) xpc_exit_sn2(); else if (is_uv()) xpc_exit_uv(); return ret; }
static int __init iio_sysfs_trig_init(void) { device_initialize(&iio_sysfs_trig_dev); dev_set_name(&iio_sysfs_trig_dev, "iio_sysfs_trigger"); return device_add(&iio_sysfs_trig_dev); }
/** * usb_alloc_dev - usb device constructor (usbcore-internal) * @parent: hub to which device is connected; null to allocate a root hub * @bus: bus used to access the device * @port1: one-based index of port; ignored for root hubs * Context: !in_interrupt() * * Only hub drivers (including virtual root hub drivers for host * controllers) should ever call this. * * This call may not be used in a non-sleeping context. */ struct usb_device *usb_alloc_dev(struct usb_device *parent, struct usb_bus *bus, unsigned port1) { struct usb_device *dev; struct usb_hcd *usb_hcd = container_of(bus, struct usb_hcd, self); unsigned root_hub = 0; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return NULL; if (!usb_get_hcd(bus_to_hcd(bus))) { kfree(dev); return NULL; } device_initialize(&dev->dev); dev->dev.bus = &usb_bus_type; dev->dev.type = &usb_device_type; dev->dev.groups = usb_device_groups; dev->dev.dma_mask = bus->controller->dma_mask; set_dev_node(&dev->dev, dev_to_node(bus->controller)); dev->state = USB_STATE_ATTACHED; atomic_set(&dev->urbnum, 0); INIT_LIST_HEAD(&dev->ep0.urb_list); dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE; dev->ep0.desc.bDescriptorType = USB_DT_ENDPOINT; /* ep0 maxpacket comes later, from device descriptor */ usb_enable_endpoint(dev, &dev->ep0, true); dev->can_submit = 1; /* Save readable and stable topology id, distinguishing devices * by location for diagnostics, tools, driver model, etc. The * string is a path along hub ports, from the root. Each device's * dev->devpath will be stable until USB is re-cabled, and hubs * are often labeled with these port numbers. The name isn't * as stable: bus->busnum changes easily from modprobe order, * cardbus or pci hotplugging, and so on. */ if (unlikely(!parent)) { dev->devpath[0] = '0'; dev->dev.parent = bus->controller; dev_set_name(&dev->dev, "usb%d", bus->busnum); root_hub = 1; } else { /* match any labeling on the hubs; it's one-based */ if (parent->devpath[0] == '0') snprintf(dev->devpath, sizeof dev->devpath, "%d", port1); else snprintf(dev->devpath, sizeof dev->devpath, "%s.%d", parent->devpath, port1); dev->dev.parent = &parent->dev; dev_set_name(&dev->dev, "%d-%s", bus->busnum, dev->devpath); /* hub driver sets up TT records */ } dev->portnum = port1; dev->bus = bus; dev->parent = parent; INIT_LIST_HEAD(&dev->filelist); #ifdef CONFIG_PM mutex_init(&dev->pm_mutex); INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work); dev->autosuspend_delay = usb_autosuspend_delay * HZ; dev->connect_time = jiffies; dev->active_duration = -jiffies; #endif if (root_hub) /* Root hub always ok [and always wired] */ dev->authorized = 1; else { dev->authorized = usb_hcd->authorized_default; dev->wusb = usb_bus_is_wusb(bus)? 1 : 0; } return dev; }
/** * zfcp_unit_enqueue - enqueue unit to unit list of a port. * @port: pointer to port where unit is added * @fcp_lun: FCP LUN of unit to be enqueued * Returns: pointer to enqueued unit on success, ERR_PTR on error * Locks: config_mutex must be held to serialize changes to the unit list * * Sets up some unit internal structures and creates sysfs entry. */ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) { struct zfcp_unit *unit; read_lock_irq(&zfcp_data.config_lock); if (zfcp_get_unit_by_lun(port, fcp_lun)) { read_unlock_irq(&zfcp_data.config_lock); return ERR_PTR(-EINVAL); } read_unlock_irq(&zfcp_data.config_lock); unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); if (!unit) return ERR_PTR(-ENOMEM); atomic_set(&unit->refcount, 0); init_waitqueue_head(&unit->remove_wq); INIT_WORK(&unit->scsi_work, zfcp_scsi_scan); unit->port = port; unit->fcp_lun = fcp_lun; if (dev_set_name(&unit->sysfs_device, "0x%016llx", (unsigned long long) fcp_lun)) { kfree(unit); return ERR_PTR(-ENOMEM); } unit->sysfs_device.parent = &port->sysfs_device; unit->sysfs_device.release = zfcp_sysfs_unit_release; dev_set_drvdata(&unit->sysfs_device, unit); /* mark unit unusable as long as sysfs registration is not complete */ atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); spin_lock_init(&unit->latencies.lock); unit->latencies.write.channel.min = 0xFFFFFFFF; unit->latencies.write.fabric.min = 0xFFFFFFFF; unit->latencies.read.channel.min = 0xFFFFFFFF; unit->latencies.read.fabric.min = 0xFFFFFFFF; unit->latencies.cmd.channel.min = 0xFFFFFFFF; unit->latencies.cmd.fabric.min = 0xFFFFFFFF; if (device_register(&unit->sysfs_device)) { put_device(&unit->sysfs_device); return ERR_PTR(-EINVAL); } if (sysfs_create_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs)) { device_unregister(&unit->sysfs_device); return ERR_PTR(-EINVAL); } zfcp_unit_get(unit); write_lock_irq(&zfcp_data.config_lock); list_add_tail(&unit->list, &port->unit_list_head); atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status); write_unlock_irq(&zfcp_data.config_lock); zfcp_port_get(port); return unit; }
/* * Create a new Memory Controller kobject instance, * mc<id> under the 'mc' directory * * Return: * 0 Success * !0 Failure */ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) { int i, err; /* * The memory controller needs its own bus, in order to avoid * namespace conflicts at /sys/bus/edac. */ mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); if (!mci->bus->name) return -ENOMEM; edac_dbg(0, "creating bus %s\n", mci->bus->name); err = bus_register(mci->bus); if (err < 0) return err; /* get the /sys/devices/system/edac subsys reference */ mci->dev.type = &mci_attr_type; device_initialize(&mci->dev); mci->dev.parent = mci_pdev; mci->dev.bus = mci->bus; dev_set_name(&mci->dev, "mc%d", mci->mc_idx); dev_set_drvdata(&mci->dev, mci); pm_runtime_forbid(&mci->dev); edac_dbg(0, "creating device %s\n", dev_name(&mci->dev)); err = device_add(&mci->dev); if (err < 0) { edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev)); bus_unregister(mci->bus); kfree(mci->bus->name); return err; } if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) { if (mci->get_sdram_scrub_rate) { dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO; dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show; } if (mci->set_sdram_scrub_rate) { dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR; dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store; } err = device_create_file(&mci->dev, &dev_attr_sdram_scrub_rate); if (err) { edac_dbg(1, "failure: create sdram_scrub_rate\n"); goto fail2; } } /* * Create the dimm/rank devices */ for (i = 0; i < mci->tot_dimms; i++) { struct dimm_info *dimm = mci->dimms[i]; /* Only expose populated DIMMs */ if (dimm->nr_pages == 0) continue; #ifdef CONFIG_EDAC_DEBUG edac_dbg(1, "creating dimm%d, located at ", i); if (edac_debug_level >= 1) { int lay; for (lay = 0; lay < mci->n_layers; lay++) printk(KERN_CONT "%s %d ", edac_layer_name[mci->layers[lay].type], dimm->location[lay]); printk(KERN_CONT "\n"); } #endif err = edac_create_dimm_object(mci, dimm, i); if (err) { edac_dbg(1, "failure: create dimm %d obj\n", i); goto fail; } } #ifdef CONFIG_EDAC_LEGACY_SYSFS err = edac_create_csrow_objects(mci); if (err < 0) goto fail; #endif #ifdef CONFIG_EDAC_DEBUG edac_create_debug_nodes(mci); #endif return 0; fail: for (i--; i >= 0; i--) { struct dimm_info *dimm = mci->dimms[i]; if (dimm->nr_pages == 0) continue; device_unregister(&dimm->dev); } fail2: device_unregister(&mci->dev); bus_unregister(mci->bus); kfree(mci->bus->name); return err; }
/** * zfcp_port_enqueue - enqueue port to port list of adapter * @adapter: adapter where remote port is added * @wwpn: WWPN of the remote port to be enqueued * @status: initial status for the port * @d_id: destination id of the remote port to be enqueued * Returns: pointer to enqueued port on success, ERR_PTR on error * Locks: config_mutex must be held to serialize changes to the port list * * All port internal structures are set up and the sysfs entry is generated. * d_id is used to enqueue ports with a well known address like the Directory * Service for nameserver lookup. */ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, u32 status, u32 d_id) { struct zfcp_port *port; read_lock_irq(&zfcp_data.config_lock); if (zfcp_get_port_by_wwpn(adapter, wwpn)) { read_unlock_irq(&zfcp_data.config_lock); return ERR_PTR(-EINVAL); } read_unlock_irq(&zfcp_data.config_lock); port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL); if (!port) return ERR_PTR(-ENOMEM); init_waitqueue_head(&port->remove_wq); INIT_LIST_HEAD(&port->unit_list_head); INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup); INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work); INIT_WORK(&port->rport_work, zfcp_scsi_rport_work); port->adapter = adapter; port->d_id = d_id; port->wwpn = wwpn; port->rport_task = RPORT_NONE; /* mark port unusable as long as sysfs registration is not complete */ atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status); atomic_set(&port->refcount, 0); if (dev_set_name(&port->sysfs_device, "0x%016llx", (unsigned long long)wwpn)) { kfree(port); return ERR_PTR(-ENOMEM); } port->sysfs_device.parent = &adapter->ccw_device->dev; port->sysfs_device.release = zfcp_sysfs_port_release; dev_set_drvdata(&port->sysfs_device, port); if (device_register(&port->sysfs_device)) { put_device(&port->sysfs_device); return ERR_PTR(-EINVAL); } if (sysfs_create_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs)) { device_unregister(&port->sysfs_device); return ERR_PTR(-EINVAL); } zfcp_port_get(port); write_lock_irq(&zfcp_data.config_lock); list_add_tail(&port->list, &adapter->port_list_head); atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status); write_unlock_irq(&zfcp_data.config_lock); zfcp_adapter_get(adapter); return port; }
/** * amba_device_register - register an AMBA device * @dev: AMBA device to register * @parent: parent memory resource * * Setup the AMBA device, reading the cell ID if present. * Claim the resource, and register the AMBA device with * the Linux device manager. */ int amba_device_register(struct amba_device *dev, struct resource *parent) { u32 size; void __iomem *tmp; int i, ret; device_initialize(&dev->dev); /* * Copy from device_add */ if (dev->dev.init_name) { dev_set_name(&dev->dev, "%s", dev->dev.init_name); dev->dev.init_name = NULL; } dev->dev.release = amba_device_release; dev->dev.bus = &amba_bustype; dev->dev.dma_mask = &dev->dma_mask; dev->res.name = dev_name(&dev->dev); if (!dev->dev.coherent_dma_mask && dev->dma_mask) dev_warn(&dev->dev, "coherent dma mask is unset\n"); ret = request_resource(parent, &dev->res); if (ret) goto err_out; /* * Dynamically calculate the size of the resource * and use this for iomap */ size = resource_size(&dev->res); tmp = ioremap(dev->res.start, size); if (!tmp) { ret = -ENOMEM; goto err_release; } ret = amba_get_enable_pclk(dev); if (ret == 0) { u32 pid, cid; /* * Read pid and cid based on size of resource * they are located at end of region */ for (pid = 0, i = 0; i < 4; i++) pid |= (readl(tmp + size - 0x20 + 4 * i) & 255) << (i * 8); for (cid = 0, i = 0; i < 4; i++) cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) << (i * 8); amba_put_disable_pclk(dev); if (cid == AMBA_CID) dev->periphid = pid; if (!dev->periphid) ret = -ENODEV; } iounmap(tmp); if (ret) goto err_release; ret = device_add(&dev->dev); if (ret) goto err_release; if (dev->irq[0] != NO_IRQ) ret = device_create_file(&dev->dev, &dev_attr_irq0); if (ret == 0 && dev->irq[1] != NO_IRQ) ret = device_create_file(&dev->dev, &dev_attr_irq1); if (ret == 0) return ret; device_unregister(&dev->dev); err_release: release_resource(&dev->res); err_out: return ret; }
/* * Register a new MMC card with the driver model. */ int mmc_add_card(struct mmc_card *card) { int ret; const char *type; dev_set_name(&card->dev, "%s:%04x", mmc_hostname(card->host), card->rca); switch (card->type) { case MMC_TYPE_MMC: type = "MMC"; break; case MMC_TYPE_SD: type = "SD"; if (mmc_card_blockaddr(card)) { if (mmc_card_ext_capacity(card)) type = "SDXC"; else type = "SDHC"; } break; case MMC_TYPE_SDIO: type = "SDIO"; break; case MMC_TYPE_SD_COMBO: type = "SD-combo"; if (mmc_card_blockaddr(card)) type = "SDHC-combo"; default: type = "?"; break; } if (mmc_host_is_spi(card->host)) { printk(KERN_INFO "%s: new %s%s%s card on SPI\n", mmc_hostname(card->host), mmc_card_highspeed(card) ? "high speed " : "", mmc_card_ddr_mode(card) ? "DDR " : "", type); } else { printk(KERN_INFO "%s: new %s%s%s card at address %04x\n", mmc_hostname(card->host), mmc_sd_card_uhs(card) ? "ultra high speed " : (mmc_card_highspeed(card) ? "high speed " : ""), mmc_card_ddr_mode(card) ? "DDR " : "", type, card->rca); } #ifdef CONFIG_HUAWEI_KERNEL if(MMC_TYPE_SD == card->type) { hw_extern_sdcard_insert(); } #endif #ifdef CONFIG_DEBUG_FS mmc_add_card_debugfs(card); #endif ret = device_add(&card->dev); if (ret) return ret; mmc_card_set_present(card); return 0; }
/* * Create a new Memory Controller kobject instance, * mc<id> under the 'mc' directory * * Return: * 0 Success * !0 Failure */ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci, const struct attribute_group **groups) { char *name; int i, err; /* * The memory controller needs its own bus, in order to avoid * namespace conflicts at /sys/bus/edac. */ name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); if (!name) return -ENOMEM; mci->bus->name = name; edac_dbg(0, "creating bus %s\n", mci->bus->name); err = bus_register(mci->bus); if (err < 0) { kfree(name); return err; } /* get the /sys/devices/system/edac subsys reference */ mci->dev.type = &mci_attr_type; device_initialize(&mci->dev); mci->dev.parent = mci_pdev; mci->dev.bus = mci->bus; mci->dev.groups = groups; dev_set_name(&mci->dev, "mc%d", mci->mc_idx); dev_set_drvdata(&mci->dev, mci); pm_runtime_forbid(&mci->dev); edac_dbg(0, "creating device %s\n", dev_name(&mci->dev)); err = device_add(&mci->dev); if (err < 0) { edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev)); goto fail_unregister_bus; } /* * Create the dimm/rank devices */ for (i = 0; i < mci->tot_dimms; i++) { struct dimm_info *dimm = mci->dimms[i]; /* Only expose populated DIMMs */ if (!dimm->nr_pages) continue; #ifdef CONFIG_EDAC_DEBUG edac_dbg(1, "creating dimm%d, located at ", i); if (edac_debug_level >= 1) { int lay; for (lay = 0; lay < mci->n_layers; lay++) printk(KERN_CONT "%s %d ", edac_layer_name[mci->layers[lay].type], dimm->location[lay]); printk(KERN_CONT "\n"); } #endif err = edac_create_dimm_object(mci, dimm, i); if (err) { edac_dbg(1, "failure: create dimm %d obj\n", i); goto fail_unregister_dimm; } } #ifdef CONFIG_EDAC_LEGACY_SYSFS err = edac_create_csrow_objects(mci); if (err < 0) goto fail_unregister_dimm; #endif edac_create_debugfs_nodes(mci); return 0; fail_unregister_dimm: for (i--; i >= 0; i--) { struct dimm_info *dimm = mci->dimms[i]; if (!dimm->nr_pages) continue; device_unregister(&dimm->dev); } device_unregister(&mci->dev); fail_unregister_bus: bus_unregister(mci->bus); kfree(name); return err; }
/** * intel_qrk_capsule_update_init * * @return 0 success < 0 failure * * Module entry point */ static int __init efi_capsule_update_init(void) { int retval = 0; extern struct kobject * firmware_kobj; INIT_LIST_HEAD(&sg_list); /* efi_capsule_kobj subordinate of firmware @ /sys/firmware/efi */ efi_capsule_kobj = kobject_create_and_add("efi_capsule", firmware_kobj); if (!efi_capsule_kobj) { pr_err(PFX"kset create error\n"); retval = -ENODEV; goto err; } dev = kzalloc(sizeof(struct device), GFP_KERNEL); if (!dev) { retval = -ENOMEM; goto err_name; } retval = dev_set_name(dev, "%s", DRIVER_NAME); if (retval < 0){ pr_err(PFX"dev_set_name err\n"); goto err_dev_reg; } dev->kobj.parent = efi_capsule_kobj; dev->groups = NULL; dev->release = efi_capsule_device_release; retval = device_register(dev); if (retval < 0){ pr_err(PFX"device_register error\n"); goto err_dev_reg; } if(sysfs_create_file(efi_capsule_kobj, &efi_capsule_path_attr.attr)) { pr_err(PFX SYSFS_ERRTXT); retval = -ENODEV; goto err_dev_reg; } if(sysfs_create_file(efi_capsule_kobj, &efi_capsule_update_attr.attr)) { pr_err(PFX SYSFS_ERRTXT); retval = -ENODEV; goto err_dev_reg; } if(sysfs_create_file(efi_capsule_kobj, &efi_capsule_csh_jump_attr.attr)) { pr_err(PFX SYSFS_ERRTXT); retval = -ENODEV; goto err_dev_reg; } return 0; err_dev_reg: put_device(dev); err_name: kfree(dev); err: return retval; }
void __init exynos5_smdk5410_media_init(void) { #if defined (CONFIG_CSI_D) || defined (CONFIG_S5K6B2_CSI_D) exynos5_hs_i2c1_set_platdata(NULL); #endif #if defined (CONFIG_CSI_E) || defined (CONFIG_S5K6B2_CSI_E) exynos5_hs_i2c2_set_platdata(NULL); #endif #ifdef CONFIG_VIDEO_EXYNOS_MFC s5p_mfc_set_platdata(&smdk5410_mfc_pd); dev_set_name(&s5p_device_mfc.dev, "s3c-mfc"); clk_add_alias("mfc", "s5p-mfc-v6", "mfc", &s5p_device_mfc.dev); s5p_mfc_setname(&s5p_device_mfc, "s5p-mfc-v6"); #endif platform_add_devices(smdk5410_media_devices, ARRAY_SIZE(smdk5410_media_devices)); #ifdef CONFIG_VIDEO_S5K6B2 #if defined(CONFIG_S5K6B2_CSI_C) __set_mipi_csi_config(&s5p_mipi_csis0_default_data, s5k6b2.csi_data_align); #elif defined(CONFIG_S5K6B2_CSI_D) __set_mipi_csi_config(&s5p_mipi_csis1_default_data, s5k6b2.csi_data_align); #elif defined(CONFIG_S5K6B2_CSI_E) __set_mipi_csi_config(&s5p_mipi_csis2_default_data, s5k6b2.csi_data_align); #endif #endif #ifdef CONFIG_VIDEO_EXYNOS_MIPI_CSIS s3c_set_platdata(&s5p_mipi_csis0_default_data, sizeof(s5p_mipi_csis0_default_data), &s5p_device_mipi_csis0); s3c_set_platdata(&s5p_mipi_csis1_default_data, sizeof(s5p_mipi_csis1_default_data), &s5p_device_mipi_csis1); s3c_set_platdata(&s5p_mipi_csis2_default_data, sizeof(s5p_mipi_csis2_default_data), &s5p_device_mipi_csis2); #endif #ifdef CONFIG_VIDEO_EXYNOS_FIMC_LITE smdk5410_camera_gpio_cfg(); smdk5410_set_camera_platdata(); s3c_set_platdata(&exynos_flite0_default_data, sizeof(exynos_flite0_default_data), &exynos_device_flite0); s3c_set_platdata(&exynos_flite1_default_data, sizeof(exynos_flite1_default_data), &exynos_device_flite1); s3c_set_platdata(&exynos_flite2_default_data, sizeof(exynos_flite2_default_data), &exynos_device_flite2); #endif #if defined(CONFIG_VIDEO_EXYNOS_TV) dev_set_name(&s5p_device_hdmi.dev, "exynos5-hdmi"); mxr_platdata.ip_ver = IP_VER_TV_5A_1; hdmi_platdata.ip_ver = IP_VER_TV_5A_1; s5p_tv_setup(); /* Below should be enabled after power domain is available */ #if 0 s5p_device_hdmi.dev.parent = &exynos5_device_pd[PD_DISP1].dev; s5p_device_mixer.dev.parent = &exynos5_device_pd[PD_DISP1].dev; #endif #ifdef CONFIG_VIDEO_EXYNOS_HDMI_CEC s5p_hdmi_cec_set_platdata(&hdmi_cec_data); #endif s3c_set_platdata(&mxr_platdata, sizeof(mxr_platdata), &s5p_device_mixer); s5p_hdmi_set_platdata(&hdmi_platdata); s3c_i2c2_set_platdata(NULL); i2c_register_board_info(2, i2c_devs2, ARRAY_SIZE(i2c_devs2)); #endif #ifdef CONFIG_VIDEO_EXYNOS_GSCALER exynos5_gsc_set_ip_ver(IP_VER_GSC_5A); s3c_set_platdata(&exynos_gsc0_default_data, sizeof(exynos_gsc0_default_data), &exynos5_device_gsc0); s3c_set_platdata(&exynos_gsc1_default_data, sizeof(exynos_gsc1_default_data), &exynos5_device_gsc1); s3c_set_platdata(&exynos_gsc2_default_data, sizeof(exynos_gsc2_default_data), &exynos5_device_gsc2); s3c_set_platdata(&exynos_gsc3_default_data, sizeof(exynos_gsc3_default_data), &exynos5_device_gsc3); #endif #ifdef CONFIG_VIDEO_EXYNOS5_FIMC_IS dev_set_name(&exynos5_device_fimc_is.dev, "s5p-mipi-csis.0"); clk_add_alias("gscl_wrap0", FIMC_IS_MODULE_NAME, "gscl_wrap0", &exynos5_device_fimc_is.dev); dev_set_name(&exynos5_device_fimc_is.dev, "s5p-mipi-csis.1"); clk_add_alias("gscl_wrap1", FIMC_IS_MODULE_NAME, "gscl_wrap1", &exynos5_device_fimc_is.dev); dev_set_name(&exynos5_device_fimc_is.dev, "s5p-mipi-csis.2"); clk_add_alias("gscl_wrap2", FIMC_IS_MODULE_NAME, "gscl_wrap2", &exynos5_device_fimc_is.dev); dev_set_name(&exynos5_device_fimc_is.dev, FIMC_IS_MODULE_NAME); exynos5_fimc_is_data.gpio_info = &gpio_smdk5410; exynos5_fimc_is_set_platdata(&exynos5_fimc_is_data); if (!exynos_spi_cfg_cs(spi3_csi[0].line, 3)) { s3c64xx_spi3_set_platdata(&s3c64xx_spi3_pdata, EXYNOS_SPI_SRCCLK_SCLK, ARRAY_SIZE(spi3_csi)); spi_register_board_info(spi3_board_info, ARRAY_SIZE(spi3_board_info)); } else { pr_err("%s: Error requesting gpio for SPI-CH1 CS\n", __func__); } #endif #ifdef CONFIG_VIDEO_EXYNOS_FIMG2D s5p_fimg2d_set_platdata(&fimg2d_data); #endif #ifdef CONFIG_VIDEO_EXYNOS_JPEG exynos5_jpeg_fimp_setup_clock(&s5p_device_jpeg.dev, 166500000); #endif #ifdef CONFIG_VIDEO_EXYNOS_JPEG_HX exynos5_jpeg_hx_setup_clock(&exynos5_device_jpeg_hx.dev, 300000000); #endif }