/** * watchdog_register_device() - register a watchdog device * @wdd: watchdog device * * Register a watchdog device with the kernel so that the * watchdog timer can be accessed from userspace. * * A zero is returned on success and a negative errno code for * failure. */ int watchdog_register_device(struct watchdog_device *wdd) { int ret, id, devno; if (wdd == NULL || wdd->info == NULL || wdd->ops == NULL) return -EINVAL; /* Mandatory operations need to be supported */ if (wdd->ops->start == NULL || wdd->ops->stop == NULL) return -EINVAL; watchdog_check_min_max_timeout(wdd); /* * Note: now that all watchdog_device data has been verified, we * will not check this anymore in other functions. If data gets * corrupted in a later stage then we expect a kernel panic! */ mutex_init(&wdd->lock); id = ida_simple_get(&watchdog_ida, 0, MAX_DOGS, GFP_KERNEL); if (id < 0) return id; wdd->id = id; ret = watchdog_dev_register(wdd); if (ret) { ida_simple_remove(&watchdog_ida, id); if (!(id == 0 && ret == -EBUSY)) return ret; /* Retry in case a legacy watchdog module exists */ id = ida_simple_get(&watchdog_ida, 1, MAX_DOGS, GFP_KERNEL); if (id < 0) return id; wdd->id = id; ret = watchdog_dev_register(wdd); if (ret) { ida_simple_remove(&watchdog_ida, id); return ret; } } devno = wdd->cdev.dev; wdd->dev = device_create(watchdog_class, wdd->parent, devno, NULL, "watchdog%d", wdd->id); if (IS_ERR(wdd->dev)) { watchdog_dev_unregister(wdd); ida_simple_remove(&watchdog_ida, id); ret = PTR_ERR(wdd->dev); return ret; } return 0; }
static struct device *__nd_btt_create(struct nd_region *nd_region, unsigned long lbasize, u8 *uuid, struct nd_namespace_common *ndns) { struct nd_btt *nd_btt; struct device *dev; nd_btt = kzalloc(sizeof(*nd_btt), GFP_KERNEL); if (!nd_btt) return NULL; nd_btt->id = ida_simple_get(&nd_region->btt_ida, 0, 0, GFP_KERNEL); if (nd_btt->id < 0) { kfree(nd_btt); return NULL; } nd_btt->lbasize = lbasize; if (uuid) uuid = kmemdup(uuid, 16, GFP_KERNEL); nd_btt->uuid = uuid; dev = &nd_btt->dev; dev_set_name(dev, "btt%d.%d", nd_region->id, nd_btt->id); dev->parent = &nd_region->dev; dev->type = &nd_btt_device_type; dev->groups = nd_btt_attribute_groups; device_initialize(&nd_btt->dev); if (ndns && !__nd_btt_attach_ndns(nd_btt, ndns)) { dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n", __func__, dev_name(ndns->claim)); put_device(dev); return NULL; } return dev; }
static int do_eventfd(unsigned int count, int flags) { struct eventfd_ctx *ctx; int fd; /* Check the EFD_* constants for consistency. */ BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC); BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK); if (flags & ~EFD_FLAGS_SET) return -EINVAL; ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; kref_init(&ctx->kref); init_waitqueue_head(&ctx->wqh); ctx->count = count; ctx->flags = flags; ctx->id = ida_simple_get(&eventfd_ida, 0, 0, GFP_KERNEL); fd = anon_inode_getfd("[eventfd]", &eventfd_fops, ctx, O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS)); if (fd < 0) eventfd_free_ctx(ctx); return fd; }
int register_virtio_device(struct virtio_device *dev) { int err; dev->dev.bus = &virtio_bus; /* Assign a unique device index and hence name. */ err = ida_simple_get(&virtio_index_ida, 0, 0, GFP_KERNEL); if (err < 0) goto out; dev->index = err; dev_set_name(&dev->dev, "virtio%u", dev->index); spin_lock_init(&dev->config_lock); dev->config_enabled = false; dev->config_change_pending = false; /* We always start by resetting the device, in case a previous * driver messed it up. This also tests that code path a little. */ dev->config->reset(dev); /* Acknowledge that we've seen the device. */ add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); INIT_LIST_HEAD(&dev->vqs); /* device_register() causes the bus infrastructure to look for a * matching driver. */ err = device_register(&dev->dev); out: if (err) add_status(dev, VIRTIO_CONFIG_S_FAILED); return err; }
struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, const struct attribute_group **groups, unsigned long flags, unsigned long cmd_mask, int num_flush, struct resource *flush_wpq) { struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL); struct device *dev; if (!nvdimm) return NULL; nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL); if (nvdimm->id < 0) { kfree(nvdimm); return NULL; } nvdimm->provider_data = provider_data; nvdimm->flags = flags; nvdimm->cmd_mask = cmd_mask; nvdimm->num_flush = num_flush; nvdimm->flush_wpq = flush_wpq; atomic_set(&nvdimm->busy, 0); dev = &nvdimm->dev; dev_set_name(dev, "nmem%d", nvdimm->id); dev->parent = &nvdimm_bus->dev; dev->type = &nvdimm_device_type; dev->devt = MKDEV(nvdimm_major, nvdimm->id); dev->groups = groups; nd_device_register(dev); return nvdimm; }
static int w1_ds2781_add_slave(struct w1_slave *sl) { int ret; int id; struct platform_device *pdev; id = ida_simple_get(&bat_ida, 0, 0, GFP_KERNEL); if (id < 0) { ret = id; goto noid; } pdev = platform_device_alloc("ds2781-battery", id); if (!pdev) { ret = -ENOMEM; goto pdev_alloc_failed; } pdev->dev.parent = &sl->dev; ret = platform_device_add(pdev); if (ret) goto pdev_add_failed; dev_set_drvdata(&sl->dev, pdev); return 0; pdev_add_failed: platform_device_put(pdev); pdev_alloc_failed: ida_simple_remove(&bat_ida, id); noid: return ret; }
int iio_trigger_register(struct iio_trigger *trig_info) { int ret; trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL); if (trig_info->id < 0) return trig_info->id; /* Set the name used for the sysfs directory etc */ dev_set_name(&trig_info->dev, "trigger%ld", (unsigned long) trig_info->id); ret = device_add(&trig_info->dev); if (ret) goto error_unregister_id; /* Add to list of available triggers held by the IIO core */ mutex_lock(&iio_trigger_list_lock); list_add_tail(&trig_info->list, &iio_trigger_list); mutex_unlock(&iio_trigger_list_lock); return 0; error_unregister_id: ida_simple_remove(&iio_trigger_ida, trig_info->id); return ret; }
/** * of_devfreq_cooling_register_power() - Register devfreq cooling device, * with OF and power information. * @np: Pointer to OF device_node. * @df: Pointer to devfreq device. * @dfc_power: Pointer to devfreq_cooling_power. * * Register a devfreq cooling device. The available OPPs must be * registered on the device. * * If @dfc_power is provided, the cooling device is registered with the * power extensions. For the power extensions to work correctly, * devfreq should use the simple_ondemand governor, other governors * are not currently supported. */ struct thermal_cooling_device * of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, struct devfreq_cooling_power *dfc_power) { struct thermal_cooling_device *cdev; struct devfreq_cooling_device *dfc; char dev_name[THERMAL_NAME_LENGTH]; int err; dfc = kzalloc(sizeof(*dfc), GFP_KERNEL); if (!dfc) return ERR_PTR(-ENOMEM); dfc->devfreq = df; if (dfc_power) { dfc->power_ops = dfc_power; devfreq_cooling_ops.get_requested_power = devfreq_cooling_get_requested_power; devfreq_cooling_ops.state2power = devfreq_cooling_state2power; devfreq_cooling_ops.power2state = devfreq_cooling_power2state; } err = devfreq_cooling_gen_tables(dfc); if (err) goto free_dfc; err = ida_simple_get(&devfreq_ida, 0, 0, GFP_KERNEL); if (err < 0) goto free_tables; dfc->id = err; snprintf(dev_name, sizeof(dev_name), "thermal-devfreq-%d", dfc->id); cdev = thermal_of_cooling_device_register(np, dev_name, dfc, &devfreq_cooling_ops); if (IS_ERR(cdev)) { err = PTR_ERR(cdev); dev_err(df->dev.parent, "Failed to register devfreq cooling device (%d)\n", err); goto release_ida; } dfc->cdev = cdev; return cdev; release_ida: ida_simple_remove(&devfreq_ida, dfc->id); free_tables: kfree(dfc->power_table); kfree(dfc->freq_table); free_dfc: kfree(dfc); return ERR_PTR(err); }
static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out) { int ret; ret = ida_simple_get(&dev_priv->context_hw_ida, 0, MAX_CONTEXT_HW_ID, GFP_KERNEL); if (ret < 0) { /* Contexts are only released when no longer active. * Flush any pending retires to hopefully release some * stale contexts and try again. */ i915_gem_retire_requests(dev_priv); ret = ida_simple_get(&dev_priv->context_hw_ida, 0, MAX_CONTEXT_HW_ID, GFP_KERNEL); if (ret < 0) return ret; } *out = ret; return 0; }
struct i915_gem_context * mock_context(struct drm_i915_private *i915, const char *name) { struct i915_gem_context *ctx; int ret; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return NULL; kref_init(&ctx->ref); INIT_LIST_HEAD(&ctx->link); ctx->i915 = i915; ctx->vma_lut.ht_bits = VMA_HT_BITS; ctx->vma_lut.ht_size = BIT(VMA_HT_BITS); ctx->vma_lut.ht = kcalloc(ctx->vma_lut.ht_size, sizeof(*ctx->vma_lut.ht), GFP_KERNEL); if (!ctx->vma_lut.ht) goto err_free; ret = ida_simple_get(&i915->context_hw_ida, 0, MAX_CONTEXT_HW_ID, GFP_KERNEL); if (ret < 0) goto err_vma_ht; ctx->hw_id = ret; if (name) { ctx->name = kstrdup(name, GFP_KERNEL); if (!ctx->name) goto err_put; ctx->ppgtt = mock_ppgtt(i915, name); if (!ctx->ppgtt) goto err_put; } return ctx; err_vma_ht: kvfree(ctx->vma_lut.ht); err_free: kfree(ctx); return NULL; err_put: i915_gem_context_set_closed(ctx); i915_gem_context_put(ctx); return NULL; }
static int henry_open ( struct inode *inode, struct file *file ) { struct henry_drv *drv = container_of(file->f_dentry->d_inode->i_cdev, struct henry_drv, cdev); struct henry_session *session; int ret; #ifdef CONFIG_MEM_TEST mem_alloc_page(); mem_kmalloc(); mem_vmalloc(); #endif mutex_lock(&drv->mutex); if (drv->num_exist >= VIRTUAL_DEV_NUM) { ret = -EBUSY; goto open_fail; } session = devm_kzalloc(drv->dev, sizeof(struct henry_session), GFP_KERNEL); if (!session) { ret = -ENOMEM; goto open_fail; } session->drv = drv; session->id = ida_simple_get(&drv->sess_ida, 0, 0, GFP_KERNEL); file->private_data = (void *)session; drv->num_exist++; henry_dbgfs_add_session(session); mutex_unlock(&drv->mutex); dev_info(drv->dev, "henry_drv_open\n"); return 0; open_fail: mutex_unlock(&drv->mutex); return ret; }
static bool nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE], struct nouveau_backlight *bl) { const int nb = ida_simple_get(&bl_ida, 0, 0, GFP_KERNEL); if (nb < 0 || nb >= 100) return false; if (nb > 0) snprintf(backlight_name, BL_NAME_SIZE, "nv_backlight%d", nb); else snprintf(backlight_name, BL_NAME_SIZE, "nv_backlight"); bl->id = nb; return true; }
static int xpad_led_probe(struct usb_xpad *xpad) { struct xpad_led *led; struct led_classdev *led_cdev; int error; if (xpad->xtype != XTYPE_XBOX360 && xpad->xtype != XTYPE_XBOX360W) return 0; xpad->led = led = kzalloc(sizeof(struct xpad_led), GFP_KERNEL); if (!led) return -ENOMEM; xpad->pad_nr = ida_simple_get(&xpad_pad_seq, 0, 0, GFP_KERNEL); if (xpad->pad_nr < 0) { error = xpad->pad_nr; goto err_free_mem; } snprintf(led->name, sizeof(led->name), "xpad%d", xpad->pad_nr); led->xpad = xpad; led_cdev = &led->led_cdev; led_cdev->name = led->name; led_cdev->brightness_set = xpad_led_set; error = led_classdev_register(&xpad->udev->dev, led_cdev); if (error) goto err_free_id; if (xpad->xtype == XTYPE_XBOX360) { /* * Light up the segment corresponding to controller * number on wired devices. On wireless we'll do that * when they respond to "presence" packet. */ xpad_identify_controller(xpad); } return 0; err_free_id: ida_simple_remove(&xpad_pad_seq, xpad->pad_nr); err_free_mem: kfree(led); xpad->led = NULL; return error; }
int mlx5_core_reserved_gid_alloc(struct mlx5_core_dev *dev, int *gid_index) { int end = dev->roce.reserved_gids.start + dev->roce.reserved_gids.count; int index = 0; index = ida_simple_get(&dev->roce.reserved_gids.ida, dev->roce.reserved_gids.start, end, GFP_KERNEL); if (index < 0) return index; mlx5_core_dbg(dev, "Allocating reserved GID %u\n", index); *gid_index = index; return 0; }
/** * hwmon_device_register - register w/ hwmon * @dev: the device to register * * hwmon_device_unregister() must be called when the device is no * longer needed. * * Returns the pointer to the new device. */ struct device *hwmon_device_register(struct device *dev) { struct device *hwdev; int id; id = ida_simple_get(&hwmon_ida, 0, 0, GFP_KERNEL); if (id < 0) return ERR_PTR(id); hwdev = device_create(hwmon_class, dev, MKDEV(0, 0), NULL, HWMON_ID_FORMAT, id); if (IS_ERR(hwdev)) ida_simple_remove(&hwmon_ida, id); return hwdev; }
int reuseport_get_id(struct sock_reuseport *reuse) { int id; if (reuse->reuseport_id) return reuse->reuseport_id; id = ida_simple_get(&reuseport_ida, REUSEPORT_MIN_ID, 0, /* Called under reuseport_lock */ GFP_ATOMIC); if (id < 0) return id; reuse->reuseport_id = id; return reuse->reuseport_id; }
/** * tb_domain_alloc() - Allocate a domain * @nhi: Pointer to the host controller * @privsize: Size of the connection manager private data * * Allocates and initializes a new Thunderbolt domain. Connection * managers are expected to call this and then fill in @cm_ops * accordingly. * * Call tb_domain_put() to release the domain before it has been added * to the system. * * Return: allocated domain structure on %NULL in case of error */ struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize) { struct tb *tb; /* * Make sure the structure sizes map with that the hardware * expects because bit-fields are being used. */ BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4); BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4); BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4); tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL); if (!tb) return NULL; tb->nhi = nhi; mutex_init(&tb->lock); tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL); if (tb->index < 0) goto err_free; tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index); if (!tb->wq) goto err_remove_ida; tb->dev.parent = &nhi->pdev->dev; tb->dev.bus = &tb_bus_type; tb->dev.type = &tb_domain_type; tb->dev.groups = domain_attr_groups; dev_set_name(&tb->dev, "domain%d", tb->index); device_initialize(&tb->dev); return tb; err_remove_ida: ida_simple_remove(&tb_domain_ida, tb->index); err_free: kfree(tb); return NULL; }
struct nvdimm_bus *__nvdimm_bus_register(struct device *parent, struct nvdimm_bus_descriptor *nd_desc, struct module *module) { struct nvdimm_bus *nvdimm_bus; int rc; nvdimm_bus = kzalloc(sizeof(*nvdimm_bus), GFP_KERNEL); if (!nvdimm_bus) return NULL; INIT_LIST_HEAD(&nvdimm_bus->list); INIT_LIST_HEAD(&nvdimm_bus->poison_list); init_waitqueue_head(&nvdimm_bus->probe_wait); nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL); mutex_init(&nvdimm_bus->reconfig_mutex); if (nvdimm_bus->id < 0) { kfree(nvdimm_bus); return NULL; } nvdimm_bus->nd_desc = nd_desc; nvdimm_bus->module = module; nvdimm_bus->dev.parent = parent; nvdimm_bus->dev.release = nvdimm_bus_release; nvdimm_bus->dev.groups = nd_desc->attr_groups; dev_set_name(&nvdimm_bus->dev, "ndbus%d", nvdimm_bus->id); rc = device_register(&nvdimm_bus->dev); if (rc) { dev_dbg(&nvdimm_bus->dev, "registration failed: %d\n", rc); goto err; } rc = nvdimm_bus_create_ndctl(nvdimm_bus); if (rc) goto err; mutex_lock(&nvdimm_bus_list_mutex); list_add_tail(&nvdimm_bus->list, &nvdimm_bus_list); mutex_unlock(&nvdimm_bus_list_mutex); return nvdimm_bus; err: put_device(&nvdimm_bus->dev); return NULL; }
static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root, const char *name, umode_t mode, unsigned flags) { struct kernfs_node *kn; int ret; name = kstrdup_const(name, GFP_KERNEL); if (!name) return NULL; kn = kmem_cache_zalloc(kernfs_node_cache, GFP_KERNEL); if (!kn) goto err_out1; /* * If the ino of the sysfs entry created for a kmem cache gets * allocated from an ida layer, which is accounted to the memcg that * owns the cache, the memcg will get pinned forever. So do not account * ino ida allocations. */ ret = ida_simple_get(&root->ino_ida, 1, 0, GFP_KERNEL | __GFP_NOACCOUNT); if (ret < 0) goto err_out2; kn->ino = ret; atomic_set(&kn->count, 1); atomic_set(&kn->active, KN_DEACTIVATED_BIAS); RB_CLEAR_NODE(&kn->rb); kn->name = name; kn->mode = mode; kn->flags = flags; return kn; err_out2: kmem_cache_free(kernfs_node_cache, kn); err_out1: kfree_const(name); return NULL; }
/** * mcb_alloc_bus() - Allocate a new @mcb_bus * * Allocate a new @mcb_bus. */ struct mcb_bus *mcb_alloc_bus(void) { struct mcb_bus *bus; int bus_nr; bus = kzalloc(sizeof(struct mcb_bus), GFP_KERNEL); if (!bus) return NULL; bus_nr = ida_simple_get(&mcb_ida, 0, 0, GFP_KERNEL); if (bus_nr < 0) { kfree(bus); return ERR_PTR(bus_nr); } INIT_LIST_HEAD(&bus->children); bus->bus_nr = bus_nr; return bus; }
static int w1_ds2760_add_slave(struct w1_slave *sl) { int ret; int id; struct platform_device *pdev; id = ida_simple_get(&bat_ida, 0, 0, GFP_KERNEL); if (id < 0) { ret = id; goto noid; } pdev = platform_device_alloc("ds2760-battery", id); if (!pdev) { ret = -ENOMEM; goto pdev_alloc_failed; } pdev->dev.parent = &sl->dev; ret = platform_device_add(pdev); if (ret) goto pdev_add_failed; ret = sysfs_create_bin_file(&sl->dev.kobj, &w1_ds2760_bin_attr); if (ret) goto bin_attr_failed; dev_set_drvdata(&sl->dev, pdev); goto success; bin_attr_failed: platform_device_del(pdev); pdev_add_failed: platform_device_put(pdev); pdev_alloc_failed: ida_simple_remove(&bat_ida, id); noid: success: return ret; }
struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots, const struct ipack_bus_ops *ops) { int bus_nr; struct ipack_bus_device *bus; bus = kzalloc(sizeof(struct ipack_bus_device), GFP_KERNEL); if (!bus) return NULL; bus_nr = ida_simple_get(&ipack_ida, 0, 0, GFP_KERNEL); if (bus_nr < 0) { kfree(bus); return NULL; } bus->bus_nr = bus_nr; bus->parent = parent; bus->slots = slots; bus->ops = ops; return bus; }
static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root, const char *name, umode_t mode, unsigned flags) { char *dup_name = NULL; struct kernfs_node *kn; int ret; if (!(flags & KERNFS_STATIC_NAME)) { name = dup_name = kstrdup(name, GFP_KERNEL); if (!name) return NULL; } kn = kmem_cache_zalloc(kernfs_node_cache, GFP_KERNEL); if (!kn) goto err_out1; ret = ida_simple_get(&root->ino_ida, 1, 0, GFP_KERNEL); if (ret < 0) goto err_out2; kn->ino = ret; atomic_set(&kn->count, 1); atomic_set(&kn->active, KN_DEACTIVATED_BIAS); RB_CLEAR_NODE(&kn->rb); kn->name = name; kn->mode = mode; kn->flags = flags; return kn; err_out2: kmem_cache_free(kernfs_node_cache, kn); err_out1: kfree(dup_name); return NULL; }
/*! 2017. 1.07 study -ing */ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root, const char *name, umode_t mode, unsigned flags) { char *dup_name = NULL; struct kernfs_node *kn; int ret; if (!(flags & KERNFS_STATIC_NAME)) { name = dup_name = kstrdup(name, GFP_KERNEL); if (!name) return NULL; } kn = kmem_cache_zalloc(kernfs_node_cache, GFP_KERNEL); if (!kn) goto err_out1; /*! root->ino_ida에서 ida id를 하나 가져온다. */ ret = ida_simple_get(&root->ino_ida, 1, 0, GFP_KERNEL); if (ret < 0) goto err_out2; /*! 가져온 ida id 대입 */ kn->ino = ret; atomic_set(&kn->count, 1); atomic_set(&kn->active, 0); kn->name = name; kn->mode = mode; kn->flags = flags | KERNFS_REMOVED; return kn; err_out2: kmem_cache_free(kernfs_node_cache, kn); err_out1: kfree(dup_name); return NULL; }
static struct device *__nd_pfn_create(struct nd_region *nd_region, u8 *uuid, enum nd_pfn_mode mode, struct nd_namespace_common *ndns) { struct nd_pfn *nd_pfn; struct device *dev; /* we can only create pages for contiguous ranged of pmem */ if (!is_nd_pmem(&nd_region->dev)) return NULL; nd_pfn = kzalloc(sizeof(*nd_pfn), GFP_KERNEL); if (!nd_pfn) return NULL; nd_pfn->id = ida_simple_get(&nd_region->pfn_ida, 0, 0, GFP_KERNEL); if (nd_pfn->id < 0) { kfree(nd_pfn); return NULL; } nd_pfn->mode = mode; if (uuid) uuid = kmemdup(uuid, 16, GFP_KERNEL); nd_pfn->uuid = uuid; dev = &nd_pfn->dev; dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id); dev->parent = &nd_region->dev; dev->type = &nd_pfn_device_type; dev->groups = nd_pfn_attribute_groups; device_initialize(&nd_pfn->dev); if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) { dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n", __func__, dev_name(ndns->claim)); put_device(dev); return NULL; } return dev; }
int __iio_trigger_register(struct iio_trigger *trig_info, struct module *this_mod) { int ret; trig_info->owner = this_mod; trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL); if (trig_info->id < 0) return trig_info->id; /* Set the name used for the sysfs directory etc */ dev_set_name(&trig_info->dev, "trigger%ld", (unsigned long) trig_info->id); ret = device_add(&trig_info->dev); if (ret) goto error_unregister_id; /* Add to list of available triggers held by the IIO core */ mutex_lock(&iio_trigger_list_lock); if (__iio_trigger_find_by_name(trig_info->name)) { pr_err("Duplicate trigger name '%s'\n", trig_info->name); ret = -EEXIST; goto error_device_del; } list_add_tail(&trig_info->list, &iio_trigger_list); mutex_unlock(&iio_trigger_list_lock); return 0; error_device_del: mutex_unlock(&iio_trigger_list_lock); device_del(&trig_info->dev); error_unregister_id: ida_simple_remove(&iio_trigger_ida, trig_info->id); return ret; }
static struct nd_pfn *nd_pfn_alloc(struct nd_region *nd_region) { struct nd_pfn *nd_pfn; struct device *dev; nd_pfn = kzalloc(sizeof(*nd_pfn), GFP_KERNEL); if (!nd_pfn) return NULL; nd_pfn->id = ida_simple_get(&nd_region->pfn_ida, 0, 0, GFP_KERNEL); if (nd_pfn->id < 0) { kfree(nd_pfn); return NULL; } dev = &nd_pfn->dev; dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id); dev->groups = nd_pfn_attribute_groups; dev->type = &nd_pfn_device_type; dev->parent = &nd_region->dev; return nd_pfn; }
/** * mcb_alloc_bus() - Allocate a new @mcb_bus * * Allocate a new @mcb_bus. */ struct mcb_bus *mcb_alloc_bus(struct device *carrier) { struct mcb_bus *bus; int bus_nr; int rc; bus = kzalloc(sizeof(struct mcb_bus), GFP_KERNEL); if (!bus) return ERR_PTR(-ENOMEM); bus_nr = ida_simple_get(&mcb_ida, 0, 0, GFP_KERNEL); if (bus_nr < 0) { rc = bus_nr; goto err_free; } bus->bus_nr = bus_nr; bus->carrier = get_device(carrier); device_initialize(&bus->dev); bus->dev.parent = carrier; bus->dev.bus = &mcb_bus_type; bus->dev.type = &mcb_carrier_device_type; bus->dev.release = &mcb_free_bus; dev_set_name(&bus->dev, "mcb:%d", bus_nr); rc = device_add(&bus->dev); if (rc) goto err_free; return bus; err_free: put_device(carrier); kfree(bus); return ERR_PTR(rc); }
static int gb_vibrator_probe(struct gb_bundle *bundle, const struct greybus_bundle_id *id) { struct greybus_descriptor_cport *cport_desc; struct gb_connection *connection; struct gb_vibrator_device *vib; struct device *dev; int retval; if (bundle->num_cports != 1) return -ENODEV; cport_desc = &bundle->cport_desc[0]; if (cport_desc->protocol_id != GREYBUS_PROTOCOL_VIBRATOR) return -ENODEV; vib = kzalloc(sizeof(*vib), GFP_KERNEL); if (!vib) return -ENOMEM; connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id), NULL); if (IS_ERR(connection)) { retval = PTR_ERR(connection); goto err_free_vib; } gb_connection_set_data(connection, vib); vib->connection = connection; greybus_set_drvdata(bundle, vib); retval = gb_connection_enable(connection); if (retval) goto err_connection_destroy; /* * For now we create a device in sysfs for the vibrator, but odds are * there is a "real" device somewhere in the kernel for this, but I * can't find it at the moment... */ vib->minor = ida_simple_get(&minors, 0, 0, GFP_KERNEL); if (vib->minor < 0) { retval = vib->minor; goto err_connection_disable; } dev = device_create(&vibrator_class, &bundle->dev, MKDEV(0, 0), vib, "vibrator%d", vib->minor); if (IS_ERR(dev)) { retval = -EINVAL; goto err_ida_remove; } vib->dev = dev; INIT_DELAYED_WORK(&vib->delayed_work, gb_vibrator_worker); gb_pm_runtime_put_autosuspend(bundle); return 0; err_ida_remove: ida_simple_remove(&minors, vib->minor); err_connection_disable: gb_connection_disable(connection); err_connection_destroy: gb_connection_destroy(connection); err_free_vib: kfree(vib); return retval; }
static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, struct nd_region_desc *ndr_desc, struct device_type *dev_type, const char *caller) { struct nd_region *nd_region; struct device *dev; void *region_buf; unsigned int i; int ro = 0; for (i = 0; i < ndr_desc->num_mappings; i++) { struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i]; struct nvdimm *nvdimm = nd_mapping->nvdimm; if ((nd_mapping->start | nd_mapping->size) % SZ_4K) { dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n", caller, dev_name(&nvdimm->dev), i); return NULL; } if (nvdimm->flags & NDD_UNARMED) ro = 1; } if (dev_type == &nd_blk_device_type) { struct nd_blk_region_desc *ndbr_desc; struct nd_blk_region *ndbr; ndbr_desc = to_blk_region_desc(ndr_desc); ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping) * ndr_desc->num_mappings, GFP_KERNEL); if (ndbr) { nd_region = &ndbr->nd_region; ndbr->enable = ndbr_desc->enable; ndbr->disable = ndbr_desc->disable; ndbr->do_io = ndbr_desc->do_io; } region_buf = ndbr; } else { nd_region = kzalloc(sizeof(struct nd_region) + sizeof(struct nd_mapping) * ndr_desc->num_mappings, GFP_KERNEL); region_buf = nd_region; } if (!region_buf) return NULL; nd_region->id = ida_simple_get(®ion_ida, 0, 0, GFP_KERNEL); if (nd_region->id < 0) goto err_id; nd_region->lane = alloc_percpu(struct nd_percpu_lane); if (!nd_region->lane) goto err_percpu; for (i = 0; i < nr_cpu_ids; i++) { struct nd_percpu_lane *ndl; ndl = per_cpu_ptr(nd_region->lane, i); spin_lock_init(&ndl->lock); ndl->count = 0; } memcpy(nd_region->mapping, ndr_desc->nd_mapping, sizeof(struct nd_mapping) * ndr_desc->num_mappings); for (i = 0; i < ndr_desc->num_mappings; i++) { struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i]; struct nvdimm *nvdimm = nd_mapping->nvdimm; get_device(&nvdimm->dev); } nd_region->ndr_mappings = ndr_desc->num_mappings; nd_region->provider_data = ndr_desc->provider_data; nd_region->nd_set = ndr_desc->nd_set; nd_region->num_lanes = ndr_desc->num_lanes; nd_region->flags = ndr_desc->flags; nd_region->ro = ro; nd_region->numa_node = ndr_desc->numa_node; ida_init(&nd_region->ns_ida); ida_init(&nd_region->btt_ida); ida_init(&nd_region->pfn_ida); dev = &nd_region->dev; dev_set_name(dev, "region%d", nd_region->id); dev->parent = &nvdimm_bus->dev; dev->type = dev_type; dev->groups = ndr_desc->attr_groups; nd_region->ndr_size = resource_size(ndr_desc->res); nd_region->ndr_start = ndr_desc->res->start; nd_device_register(dev); return nd_region; err_percpu: ida_simple_remove(®ion_ida, nd_region->id); err_id: kfree(region_buf); return NULL; }