/** * watchdog_register_device() - register a watchdog device * @wdd: watchdog device * * Register a watchdog device with the kernel so that the * watchdog timer can be accessed from userspace. * * A zero is returned on success and a negative errno code for * failure. */ int watchdog_register_device(struct watchdog_device *wdd) { int ret, id, devno; if (wdd == NULL || wdd->info == NULL || wdd->ops == NULL) return -EINVAL; /* Mandatory operations need to be supported */ if (wdd->ops->start == NULL || wdd->ops->stop == NULL) return -EINVAL; watchdog_check_min_max_timeout(wdd); /* * Note: now that all watchdog_device data has been verified, we * will not check this anymore in other functions. If data gets * corrupted in a later stage then we expect a kernel panic! */ mutex_init(&wdd->lock); id = ida_simple_get(&watchdog_ida, 0, MAX_DOGS, GFP_KERNEL); if (id < 0) return id; wdd->id = id; ret = watchdog_dev_register(wdd); if (ret) { ida_simple_remove(&watchdog_ida, id); if (!(id == 0 && ret == -EBUSY)) return ret; /* Retry in case a legacy watchdog module exists */ id = ida_simple_get(&watchdog_ida, 1, MAX_DOGS, GFP_KERNEL); if (id < 0) return id; wdd->id = id; ret = watchdog_dev_register(wdd); if (ret) { ida_simple_remove(&watchdog_ida, id); return ret; } } devno = wdd->cdev.dev; wdd->dev = device_create(watchdog_class, wdd->parent, devno, NULL, "watchdog%d", wdd->id); if (IS_ERR(wdd->dev)) { watchdog_dev_unregister(wdd); ida_simple_remove(&watchdog_ida, id); ret = PTR_ERR(wdd->dev); return ret; } return 0; }
int iio_trigger_register(struct iio_trigger *trig_info) { int ret; trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL); if (trig_info->id < 0) return trig_info->id; /* Set the name used for the sysfs directory etc */ dev_set_name(&trig_info->dev, "trigger%ld", (unsigned long) trig_info->id); ret = device_add(&trig_info->dev); if (ret) goto error_unregister_id; /* Add to list of available triggers held by the IIO core */ mutex_lock(&iio_trigger_list_lock); list_add_tail(&trig_info->list, &iio_trigger_list); mutex_unlock(&iio_trigger_list_lock); return 0; error_unregister_id: ida_simple_remove(&iio_trigger_ida, trig_info->id); return ret; }
static int henry_close ( struct inode *inode, struct file *file ) { struct henry_session *session = file->private_data; struct henry_drv *drv = session->drv; /*Do not release resource in debug*/ if (drv->debug_mode) return 0; mutex_lock(&drv->mutex); ida_simple_remove(&drv->sess_ida, session->id); drv->num_exist--; henry_dbgfs_del_session(session); devm_kfree(drv->dev, session); mutex_unlock(&drv->mutex); dev_info(drv->dev, "henry_drv_close\n"); return 0; }
void unregister_virtio_device(struct virtio_device *dev) { int index = dev->index; /* save for after device release */ device_unregister(&dev->dev); ida_simple_remove(&virtio_index_ida, index); }
/** * kdbus_node_unref() - Drop object reference * @node: node to drop reference to (or NULL) * * This drops an object reference to @node. You must not access the node if you * no longer own a reference. * If the ref-count drops to 0, the object will be destroyed (->free_cb will be * called). * * If you linked or activated the node, you must deactivate the node before you * drop your last reference! If you didn't link or activate the node, you can * drop any reference you want. * * Note that this calls into ->free_cb() and thus _might_ sleep. The ->free_cb() * callbacks must not acquire any outer locks, though. So you can safely drop * references while holding locks. * * If @node is NULL, this is a no-op. * * Return: This always returns NULL */ struct kdbus_node *kdbus_node_unref(struct kdbus_node *node) { if (node && atomic_dec_and_test(&node->refcnt)) { struct kdbus_node safe = *node; WARN_ON(atomic_read(&node->active) != KDBUS_NODE_DRAINED); WARN_ON(!RB_EMPTY_NODE(&node->rb)); if (node->free_cb) node->free_cb(node); if (safe.id > 0) ida_simple_remove(&kdbus_node_ida, safe.id); kfree(safe.name); /* * kdbusfs relies on the parent to be available even after the * node was deactivated and unlinked. Therefore, we pin it * until a node is destroyed. */ kdbus_node_unref(safe.parent); } return NULL; }
static void nvdimm_release(struct device *dev) { struct nvdimm *nvdimm = to_nvdimm(dev); ida_simple_remove(&dimm_ida, nvdimm->id); kfree(nvdimm); }
/** * blk_release_queue: - release a &struct request_queue when it is no longer needed * @kobj: the kobj belonging to the request queue to be released * * Description: * blk_release_queue is the pair to blk_init_queue() or * blk_queue_make_request(). It should be called when a request queue is * being released; typically when a block device is being de-registered. * Currently, its primary task it to free all the &struct request * structures that were allocated to the queue and the queue itself. * * Note: * The low level driver must have finished any outstanding requests first * via blk_cleanup_queue(). **/ static void blk_release_queue(struct kobject *kobj) { struct request_queue *q = container_of(kobj, struct request_queue, kobj); blkcg_exit_queue(q); if (q->elevator) { spin_lock_irq(q->queue_lock); ioc_clear_queue(q); spin_unlock_irq(q->queue_lock); elevator_exit(q->elevator); } blk_exit_rl(&q->root_rl); if (q->queue_tags) __blk_queue_free_tags(q); kfree(q->flush_rq); blk_trace_shutdown(q); bdi_destroy(&q->backing_dev_info); ida_simple_remove(&blk_queue_ida, q->id); call_rcu(&q->rcu_head, blk_free_queue_rcu); }
static int w1_ds2781_add_slave(struct w1_slave *sl) { int ret; int id; struct platform_device *pdev; id = ida_simple_get(&bat_ida, 0, 0, GFP_KERNEL); if (id < 0) { ret = id; goto noid; } pdev = platform_device_alloc("ds2781-battery", id); if (!pdev) { ret = -ENOMEM; goto pdev_alloc_failed; } pdev->dev.parent = &sl->dev; ret = platform_device_add(pdev); if (ret) goto pdev_add_failed; dev_set_drvdata(&sl->dev, pdev); return 0; pdev_add_failed: platform_device_put(pdev); pdev_alloc_failed: ida_simple_remove(&bat_ida, id); noid: return ret; }
static void kp2000_pcie_remove(struct pci_dev *pdev) { struct kp2000_device *pcard = pci_get_drvdata(pdev); dev_dbg(&pdev->dev, "kp2000_pcie_remove(pdev=%p)\n", pdev); if (pcard == NULL) return; mutex_lock(&pcard->sem); kp2000_remove_cores(pcard); mfd_remove_devices(PCARD_TO_DEV(pcard)); misc_deregister(&pcard->miscdev); sysfs_remove_files(&(pdev->dev.kobj), kp_attr_list); free_irq(pcard->pdev->irq, pcard); pci_disable_msi(pcard->pdev); if (pcard->dma_bar_base != NULL) { iounmap(pcard->dma_bar_base); pci_release_region(pdev, DMA_BAR); pcard->dma_bar_base = NULL; } if (pcard->regs_bar_base != NULL) { iounmap(pcard->regs_bar_base); pci_release_region(pdev, REG_BAR); pcard->regs_bar_base = NULL; } pci_disable_device(pcard->pdev); pci_set_drvdata(pdev, NULL); mutex_unlock(&pcard->sem); ida_simple_remove(&card_num_ida, pcard->card_num); kfree(pcard); }
/** * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed * @kobj: the kobj belonging of the request queue to be released * * Description: * blk_cleanup_queue is the pair to blk_init_queue() or * blk_queue_make_request(). It should be called when a request queue is * being released; typically when a block device is being de-registered. * Currently, its primary task it to free all the &struct request * structures that were allocated to the queue and the queue itself. * * Caveat: * Hopefully the low level driver will have finished any * outstanding requests first... **/ static void blk_release_queue(struct kobject *kobj) { struct request_queue *q = container_of(kobj, struct request_queue, kobj); struct request_list *rl = &q->rq; blk_sync_queue(q); if (q->elevator) { spin_lock_irq(q->queue_lock); ioc_clear_queue(q); spin_unlock_irq(q->queue_lock); elevator_exit(q->elevator); } blk_throtl_exit(q); if (rl->rq_pool) mempool_destroy(rl->rq_pool); if (q->queue_tags) __blk_queue_free_tags(q); blk_throtl_release(q); blk_trace_shutdown(q); bdi_destroy(&q->backing_dev_info); ida_simple_remove(&blk_queue_ida, q->id); kmem_cache_free(blk_requestq_cachep, q); }
void i915_gem_context_free(struct kref *ctx_ref) { struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); int i; lockdep_assert_held(&ctx->i915->drm.struct_mutex); trace_i915_context_free(ctx); GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); i915_ppgtt_put(ctx->ppgtt); for (i = 0; i < I915_NUM_ENGINES; i++) { struct intel_context *ce = &ctx->engine[i]; if (!ce->state) continue; WARN_ON(ce->pin_count); if (ce->ring) intel_ring_free(ce->ring); __i915_gem_object_release_unless_active(ce->state->obj); } kfree(ctx->name); put_pid(ctx->pid); list_del(&ctx->link); ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id); kfree(ctx); }
/** * blk_release_queue: - release a &struct request_queue when it is no longer needed * @kobj: the kobj belonging to the request queue to be released * * Description: * blk_release_queue is the pair to blk_init_queue() or * blk_queue_make_request(). It should be called when a request queue is * being released; typically when a block device is being de-registered. * Currently, its primary task it to free all the &struct request * structures that were allocated to the queue and the queue itself. * * Note: * The low level driver must have finished any outstanding requests first * via blk_cleanup_queue(). **/ static void blk_release_queue(struct kobject *kobj) { struct request_queue *q = container_of(kobj, struct request_queue, kobj); blkcg_exit_queue(q); if (q->elevator) { spin_lock_irq(q->queue_lock); ioc_clear_queue(q); spin_unlock_irq(q->queue_lock); elevator_exit(q->elevator); } blk_exit_rl(&q->root_rl); if (q->queue_tags) __blk_queue_free_tags(q); if (!q->mq_ops) blk_free_flush_queue(q->fq); else blk_mq_release(q); blk_trace_shutdown(q); if (q->bio_split) bioset_free(q->bio_split); ida_simple_remove(&blk_queue_ida, q->id); call_rcu(&q->rcu_head, blk_free_queue_rcu); }
/** * of_devfreq_cooling_register_power() - Register devfreq cooling device, * with OF and power information. * @np: Pointer to OF device_node. * @df: Pointer to devfreq device. * @dfc_power: Pointer to devfreq_cooling_power. * * Register a devfreq cooling device. The available OPPs must be * registered on the device. * * If @dfc_power is provided, the cooling device is registered with the * power extensions. For the power extensions to work correctly, * devfreq should use the simple_ondemand governor, other governors * are not currently supported. */ struct thermal_cooling_device * of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, struct devfreq_cooling_power *dfc_power) { struct thermal_cooling_device *cdev; struct devfreq_cooling_device *dfc; char dev_name[THERMAL_NAME_LENGTH]; int err; dfc = kzalloc(sizeof(*dfc), GFP_KERNEL); if (!dfc) return ERR_PTR(-ENOMEM); dfc->devfreq = df; if (dfc_power) { dfc->power_ops = dfc_power; devfreq_cooling_ops.get_requested_power = devfreq_cooling_get_requested_power; devfreq_cooling_ops.state2power = devfreq_cooling_state2power; devfreq_cooling_ops.power2state = devfreq_cooling_power2state; } err = devfreq_cooling_gen_tables(dfc); if (err) goto free_dfc; err = ida_simple_get(&devfreq_ida, 0, 0, GFP_KERNEL); if (err < 0) goto free_tables; dfc->id = err; snprintf(dev_name, sizeof(dev_name), "thermal-devfreq-%d", dfc->id); cdev = thermal_of_cooling_device_register(np, dev_name, dfc, &devfreq_cooling_ops); if (IS_ERR(cdev)) { err = PTR_ERR(cdev); dev_err(df->dev.parent, "Failed to register devfreq cooling device (%d)\n", err); goto release_ida; } dfc->cdev = cdev; return cdev; release_ida: ida_simple_remove(&devfreq_ida, dfc->id); free_tables: kfree(dfc->power_table); kfree(dfc->freq_table); free_dfc: kfree(dfc); return ERR_PTR(err); }
static void flexrm_shutdown(struct mbox_chan *chan) { u32 reqid; unsigned int timeout; struct brcm_message *msg; struct flexrm_ring *ring = chan->con_priv; /* Disable/inactivate ring */ writel_relaxed(0x0, ring->regs + RING_CONTROL); /* Flush ring with timeout of 1s */ timeout = 1000; writel_relaxed(BIT(CONTROL_FLUSH_SHIFT), ring->regs + RING_CONTROL); do { if (readl_relaxed(ring->regs + RING_FLUSH_DONE) & FLUSH_DONE_MASK) break; mdelay(1); } while (timeout--); /* Abort all in-flight requests */ for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) { msg = ring->requests[reqid]; if (!msg) continue; /* Release reqid for recycling */ ring->requests[reqid] = NULL; ida_simple_remove(&ring->requests_ida, reqid); /* Unmap DMA mappings */ flexrm_dma_unmap(ring->mbox->dev, msg); /* Give-back message to mailbox client */ msg->error = -EIO; mbox_chan_received_data(chan, msg); } /* Release IRQ */ if (ring->irq_requested) { free_irq(ring->irq, ring); ring->irq_requested = false; } /* Free-up completion descriptor ring */ if (ring->cmpl_base) { dma_pool_free(ring->mbox->cmpl_pool, ring->cmpl_base, ring->cmpl_dma_base); ring->cmpl_base = NULL; } /* Free-up BD descriptor ring */ if (ring->bd_base) { dma_pool_free(ring->mbox->bd_pool, ring->bd_base, ring->bd_dma_base); ring->bd_base = NULL; } }
/** * mcb_release_bus() - Free a @mcb_bus * @bus: The @mcb_bus to release * * Release an allocated @mcb_bus from the system. */ void mcb_release_bus(struct mcb_bus *bus) { mcb_devices_unregister(bus); ida_simple_remove(&mcb_ida, bus->bus_nr); kfree(bus); }
static void nvdimm_bus_release(struct device *dev) { struct nvdimm_bus *nvdimm_bus; nvdimm_bus = container_of(dev, struct nvdimm_bus, dev); ida_simple_remove(&nd_ida, nvdimm_bus->id); kfree(nvdimm_bus); }
int ipack_bus_unregister(struct ipack_bus_device *bus) { bus_for_each_dev(&ipack_bus_type, NULL, bus, ipack_unregister_bus_member); ida_simple_remove(&ipack_ida, bus->bus_nr); kfree(bus); return 0; }
static void w1_ds2781_remove_slave(struct w1_slave *sl) { struct platform_device *pdev = dev_get_drvdata(&sl->dev); int id = pdev->id; platform_device_unregister(pdev); ida_simple_remove(&bat_ida, id); }
static void mcb_free_bus(struct device *dev) { struct mcb_bus *bus = to_mcb_bus(dev); put_device(bus->carrier); ida_simple_remove(&mcb_ida, bus->bus_nr); kfree(bus); }
static void delete_ptp_clock(struct posix_clock *pc) { struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); mutex_destroy(&ptp->tsevq_mux); ida_simple_remove(&ptp_clocks_map, ptp->index); kfree(ptp); }
static void w1_ds2760_remove_slave(struct w1_slave *sl) { struct platform_device *pdev = dev_get_drvdata(&sl->dev); int id = pdev->id; platform_device_unregister(pdev); ida_simple_remove(&bat_ida, id); sysfs_remove_bin_file(&sl->dev.kobj, &w1_ds2760_bin_attr); }
static int gpio_exar_remove(struct platform_device *pdev) { struct exar_gpio_chip *exar_gpio = platform_get_drvdata(pdev); ida_simple_remove(&ida_index, exar_gpio->index); mutex_destroy(&exar_gpio->lock); return 0; }
static void reuseport_free_rcu(struct rcu_head *head) { struct sock_reuseport *reuse; reuse = container_of(head, struct sock_reuseport, rcu); sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1)); if (reuse->reuseport_id) ida_simple_remove(&reuseport_ida, reuse->reuseport_id); kfree(reuse); }
void iio_trigger_unregister(struct iio_trigger *trig_info) { mutex_lock(&iio_trigger_list_lock); list_del(&trig_info->list); mutex_unlock(&iio_trigger_list_lock); ida_simple_remove(&iio_trigger_ida, trig_info->id); /* Possible issue in here */ device_del(&trig_info->dev); }
static void gb_hd_release(struct device *dev) { struct gb_host_device *hd = to_gb_host_device(dev); if (hd->svc) gb_svc_put(hd->svc); ida_simple_remove(&gb_hd_bus_id_map, hd->bus_id); ida_destroy(&hd->cport_id_map); kfree(hd); }
static void xpad_led_disconnect(struct usb_xpad *xpad) { struct xpad_led *xpad_led = xpad->led; if (xpad_led) { led_classdev_unregister(&xpad_led->led_cdev); ida_simple_remove(&xpad_pad_seq, xpad->pad_nr); kfree(xpad_led); } }
static void tb_domain_release(struct device *dev) { struct tb *tb = container_of(dev, struct tb, dev); tb_ctl_free(tb->ctl); destroy_workqueue(tb->wq); ida_simple_remove(&tb_domain_ida, tb->index); mutex_destroy(&tb->lock); kfree(tb); }
void unregister_virtio_device(struct virtio_device *dev) { #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13) int index = dev->index; /* save for after device release */ #endif device_unregister(&dev->dev); #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13) ida_simple_remove(&virtio_index_ida, index); #endif }
/** * hwmon_device_unregister - removes the previously registered class device * * @dev: the class device to destroy */ void hwmon_device_unregister(struct device *dev) { int id; if (likely(sscanf(dev_name(dev), HWMON_ID_FORMAT, &id) == 1)) { device_unregister(dev); ida_simple_remove(&hwmon_ida, id); } else dev_dbg(dev->parent, "hwmon_device_unregister() failed: bad class ID!\n"); }
static void nd_pfn_release(struct device *dev) { struct nd_region *nd_region = to_nd_region(dev->parent); struct nd_pfn *nd_pfn = to_nd_pfn(dev); dev_dbg(dev, "%s\n", __func__); nd_detach_ndns(&nd_pfn->dev, &nd_pfn->ndns); ida_simple_remove(&nd_region->pfn_ida, nd_pfn->id); kfree(nd_pfn->uuid); kfree(nd_pfn); }