/** * devm_regulator_register_supply_alias - Resource managed * regulator_register_supply_alias() * * @dev: device that will be given as the regulator "consumer" * @id: Supply name or regulator ID * @alias_dev: device that should be used to lookup the supply * @alias_id: Supply name or regulator ID that should be used to lookup the * supply * * The supply alias will automatically be unregistered when the source * device is unbound. */ int devm_regulator_register_supply_alias(struct device *dev, const char *id, struct device *alias_dev, const char *alias_id) { struct regulator_supply_alias_match *match; int ret; match = devres_alloc(devm_regulator_destroy_supply_alias, sizeof(struct regulator_supply_alias_match), GFP_KERNEL); if (!match) return -ENOMEM; match->dev = dev; match->id = id; ret = regulator_register_supply_alias(dev, id, alias_dev, alias_id); if (ret < 0) { devres_free(match); return ret; } devres_add(dev, match); return 0; }
struct dentry* devm_hello_create_file( struct device *dev, const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations* fops) { struct dentry *pfile = NULL; void *ptr = NULL; if(!fops || !name || name[0] == '\0') return NULL; ptr = devres_alloc(devm_hello_release, 0, GFP_KERNEL); if (!ptr) return NULL; pfile = debugfs_create_file(name, mode, parent, data, fops); if (pfile) { devres_add(dev, ptr); } else { devres_free(ptr); } return pfile; }
static struct regulator *_devm_regulator_get(struct device *dev, const char *id, int get_type) { struct regulator **ptr, *regulator; ptr = devres_alloc(devm_regulator_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM); switch (get_type) { case NORMAL_GET: regulator = regulator_get(dev, id); break; case EXCLUSIVE_GET: regulator = regulator_get_exclusive(dev, id); break; case OPTIONAL_GET: regulator = regulator_get_optional(dev, id); break; default: regulator = ERR_PTR(-EINVAL); } if (!IS_ERR(regulator)) { *ptr = regulator; devres_add(dev, ptr); } else { devres_free(ptr); } return regulator; }
struct stm_amba_bridge *stm_amba_bridge_create(struct device *dev, void __iomem *base, struct stm_amba_bridge_config *bus_config) { struct stm_amba_bridge *plug; plug = devres_alloc(stm_amba_bridge_devres_release, sizeof(*plug), GFP_KERNEL); if (!plug) return ERR_PTR(-ENOMEM); devres_add(dev, plug); plug->base = base; plug->dev = dev; plug->config = bus_config; plug->dentry = NULL; spin_lock(&dfs_lock); if (!dfs_dir) { dfs_dir = debugfs_create_dir("amba-stbus-bridge", NULL); if (IS_ERR(dfs_dir)) { dfs_dir = NULL; goto out; } } plug->dentry = debugfs_create_file(dev_name(dev), S_IFREG | S_IRUGO, dfs_dir, plug, STM_AMBA_BRIDGE_DEBUGFS_FOPS); if (IS_ERR(plug->dentry)) plug->dentry = NULL; out: atomic_inc(&ref_count); spin_unlock(&dfs_lock); return plug; }
/** * of_regulator_match - extract multiple regulator init data from device tree. * @dev: device requesting the data * @node: parent device node of the regulators * @matches: match table for the regulators * @num_matches: number of entries in match table * * This function uses a match table specified by the regulator driver to * parse regulator init data from the device tree. @node is expected to * contain a set of child nodes, each providing the init data for one * regulator. The data parsed from a child node will be matched to a regulator * based on either the deprecated property regulator-compatible if present, * or otherwise the child node's name. Note that the match table is modified * in place and an additional of_node reference is taken for each matched * regulator. * * Returns the number of matches found or a negative error code on failure. */ int of_regulator_match(struct device *dev, struct device_node *node, struct of_regulator_match *matches, unsigned int num_matches) { unsigned int count = 0; unsigned int i; const char *name; struct device_node *child; struct devm_of_regulator_matches *devm_matches; if (!dev || !node) return -EINVAL; devm_matches = devres_alloc(devm_of_regulator_put_matches, sizeof(struct devm_of_regulator_matches), GFP_KERNEL); if (!devm_matches) return -ENOMEM; devm_matches->matches = matches; devm_matches->num_matches = num_matches; devres_add(dev, devm_matches); for (i = 0; i < num_matches; i++) { struct of_regulator_match *match = &matches[i]; match->init_data = NULL; match->of_node = NULL; } for_each_child_of_node(node, child) { name = of_get_property(child, "regulator-compatible", NULL); if (!name) name = child->name; for (i = 0; i < num_matches; i++) { struct of_regulator_match *match = &matches[i]; if (match->of_node) continue; if (strcmp(match->name, name)) continue; match->init_data = of_get_regulator_init_data(dev, child, match->desc); if (!match->init_data) { dev_err(dev, "failed to parse DT for regulator %pOFn\n", child); of_node_put(child); return -EINVAL; } match->of_node = of_node_get(child); count++; break; } }
/** * input_register_polled_device - register polled device * @dev: device to register * * The function registers previously initialized polled input device * with input layer. The device should be allocated with call to * input_allocate_polled_device(). Callers should also set up poll() * method and set up capabilities (id, name, phys, bits) of the * corresponding input_dev structure. */ int input_register_polled_device(struct input_polled_dev *dev) { struct input_polled_devres *devres = NULL; struct input_dev *input = dev->input; int error; if (dev->devres_managed) { devres = devres_alloc(devm_input_polldev_unregister, sizeof(*devres), GFP_KERNEL); if (!devres) return -ENOMEM; devres->polldev = dev; } input_set_drvdata(input, dev); INIT_DELAYED_WORK(&dev->work, input_polled_device_work); if (!dev->poll_interval) dev->poll_interval = 500; if (!dev->poll_interval_max) dev->poll_interval_max = dev->poll_interval; input->open = input_open_polled_device; input->close = input_close_polled_device; input->dev.groups = input_polldev_attribute_groups; error = input_register_device(input); if (error) { devres_free(devres); return error; } /* * Take extra reference to the underlying input device so * that it survives call to input_unregister_polled_device() * and is deleted only after input_free_polled_device() * has been invoked. This is needed to ease task of freeing * sparse keymaps. */ input_get_device(input); if (dev->devres_managed) { dev_dbg(input->dev.parent, "%s: registering %s with devres.\n", __func__, dev_name(&input->dev)); devres_add(input->dev.parent, devres); } return 0; }
struct clk *devm_clk_get(struct device *dev, const char *id) { struct clk **ptr, *clk; ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM); clk = clk_get(dev, id); if (!IS_ERR(clk)) { *ptr = clk; devres_add(dev, ptr); } else { devres_free(ptr); } return clk; }
/** * devm_iio_fifo_allocate - Resource-managed iio_kfifo_allocate() * @dev: Device to allocate kfifo buffer for * * RETURNS: * Pointer to allocated iio_buffer on success, NULL on failure. */ struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev) { struct iio_buffer **ptr, *r; ptr = devres_alloc(devm_iio_kfifo_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return NULL; r = iio_kfifo_allocate(); if (r) { *ptr = r; devres_add(dev, ptr); } else { devres_free(ptr); } return r; }
/** * devm_ioremap_wc - Managed ioremap_wc() * @dev: Generic device to remap IO address for * @offset: BUS offset to map * @size: Size of map * * Managed ioremap_wc(). Map is automatically unmapped on driver detach. */ void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset, resource_size_t size) { void __iomem **ptr, *addr; ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return NULL; addr = ioremap_wc(offset, size); if (addr) { *ptr = addr; devres_add(dev, ptr); } else devres_free(ptr); return addr; }
void *devm_memremap(struct device *dev, resource_size_t offset, size_t size, unsigned long flags) { void **ptr, *addr; ptr = devres_alloc(devm_memremap_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return NULL; addr = memremap(offset, size, flags); if (addr) { *ptr = addr; devres_add(dev, ptr); } else devres_free(ptr); return addr; }
/** * devm_ioport_map - Managed ioport_map() * @dev: Generic device to map ioport for * @port: Port to map * @nr: Number of ports to map * * Managed ioport_map(). Map is automatically unmapped on driver * detach. */ void __iomem *devm_ioport_map(struct device *dev, unsigned long port, unsigned int nr) { void __iomem **ptr, *addr; ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return NULL; addr = ioport_map(port, nr); if (addr) { *ptr = addr; devres_add(dev, ptr); } else devres_free(ptr); return addr; }
struct kobject *devm_kobject_create_and_add(struct device *dev, const char *name, struct kobject *parent) { struct kobject **ptr, *kobj; ptr = devres_alloc(devm_kobject_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM); kobj = kobject_create_and_add("kobject_example", &dev->kobj); if (kobj) { *ptr = kobj; devres_add(dev, ptr); } else { devres_free(ptr); } return kobj; }
/** * devm_snd_soc_register_card - resource managed card registration * @dev: Device used to manage card * @card: Card to register * * Register a card with automatic unregistration when the device is * unregistered. */ int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card) { struct snd_soc_card **ptr; int ret; ptr = devres_alloc(devm_card_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; ret = snd_soc_register_card(card); if (ret == 0) { *ptr = card; devres_add(dev, ptr); } else { devres_free(ptr); } return ret; }
struct clk *devm_get_clk_from_child(struct device *dev, struct device_node *np, const char *con_id) { struct clk **ptr, *clk; ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM); clk = of_clk_get_by_name(np, con_id); if (!IS_ERR(clk)) { *ptr = clk; devres_add(dev, ptr); } else { devres_free(ptr); } return clk; }
/* add firmware name into devres list */ static int fw_add_devm_name(struct device *dev, const char *name) { struct fw_name_devm *fwn; fwn = fw_find_devm_name(dev, name); if (fwn) return 1; fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm) + strlen(name) + 1, GFP_KERNEL); if (!fwn) return -ENOMEM; fwn->magic = (unsigned long)&fw_cache; strcpy(fwn->name, name); devres_add(dev, fwn); return 0; }
static int snd_devm_add_child(struct device *dev, struct device *child) { struct device **dr; int ret; dr = devres_alloc(snd_devm_unregister_child, sizeof(*dr), GFP_KERNEL); if (!dr) return -ENOMEM; ret = device_add(child); if (ret) { devres_free(dr); return ret; } *dr = child; devres_add(dev, dr); return 0; }
/** * devm_ioremap_exec_nocache - Managed ioremap_exec_nocache() * @dev: Generic device to remap IO address for * @offset: BUS offset to map * @size: Size of map * * Managed ioremap_exec_nocache(). Map is automatically unmapped on driver * detach. */ void __iomem *devm_ioremap_exec_nocache(struct device *dev, resource_size_t offset, unsigned long size) { void __iomem **ptr, *addr; ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return NULL; addr = ioremap_exec_nocache(offset, size); if (addr) { *ptr = addr; devres_add(dev, ptr); } else { devres_free(ptr); } return addr; }
/** * devm_snd_soc_register_platform - resource managed platform registration * @dev: Device used to manage platform * @platform_drv: platform to register * * Register a platform driver with automatic unregistration when the device is * unregistered. */ int devm_snd_soc_register_platform(struct device *dev, const struct snd_soc_platform_driver *platform_drv) { struct device **ptr; int ret; ptr = devres_alloc(devm_platform_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; ret = snd_soc_register_platform(dev, platform_drv); if (ret == 0) { *ptr = dev; devres_add(dev, ptr); } else { devres_free(ptr); } return ret; }
int devm_gpio_request(struct device *dev, unsigned gpio, const char *label) { unsigned *dr; int rc; dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL); if (!dr) return -ENOMEM; rc = gpio_request(gpio, label); if (rc) { devres_free(dr); return rc; } *dr = gpio; devres_add(dev, dr); return 0; }
static int bcm2835_devm_add_vchi_ctx(struct device *dev) { struct bcm2835_vchi_ctx *vchi_ctx; int ret; vchi_ctx = devres_alloc(bcm2835_devm_free_vchi_ctx, sizeof(*vchi_ctx), GFP_KERNEL); if (!vchi_ctx) return -ENOMEM; ret = bcm2835_new_vchi_ctx(dev, vchi_ctx); if (ret) { devres_free(vchi_ctx); return ret; } devres_add(dev, vchi_ctx); return 0; }
/** * devm_snd_dmaengine_pcm_register - resource managed dmaengine PCM registration * @dev: The parent device for the PCM device * @config: Platform specific PCM configuration * @flags: Platform specific quirks * * Register a dmaengine based PCM device with automatic unregistration when the * device is unregistered. */ int devm_snd_dmaengine_pcm_register(struct device *dev, const struct snd_dmaengine_pcm_config *config, unsigned int flags) { struct device **ptr; int ret; ptr = devres_alloc(devm_dmaengine_pcm_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; ret = snd_dmaengine_pcm_register(dev, config, flags); if (ret == 0) { *ptr = dev; devres_add(dev, ptr); } else { devres_free(ptr); } return ret; }
int devm_register_reboot_notifier(struct device *dev, struct notifier_block *nb) { struct notifier_block **rcnb; int ret; rcnb = devres_alloc(devm_unregister_reboot_notifier, sizeof(*rcnb), GFP_KERNEL); if (!rcnb) return -ENOMEM; ret = register_reboot_notifier(nb); if (!ret) { *rcnb = nb; devres_add(dev, rcnb); } else { devres_free(rcnb); } return ret; }
/** * irq_sim_init - Initialize the interrupt simulator for a managed device. * * @dev: Device to initialize the simulator object for. * @sim: The interrupt simulator object to initialize. * @num_irqs: Number of interrupts to allocate * * Returns 0 on success and a negative error number on failure. */ int devm_irq_sim_init(struct device *dev, struct irq_sim *sim, unsigned int num_irqs) { struct irq_sim_devres *dr; int rv; dr = devres_alloc(devm_irq_sim_release, sizeof(*dr), GFP_KERNEL); if (!dr) return -ENOMEM; rv = irq_sim_init(sim, num_irqs); if (rv) { devres_free(dr); return rv; } dr->sim = sim; devres_add(dev, dr); return 0; }
struct dentry* devm_hello_create_u32(struct device *dev, const char *name, umode_t mode, struct dentry *parent, u32 *pvalue) { struct dentry *pu32 = NULL;\ void *ptr = NULL; if(!pvalue || !name || name[0] == '\0') return NULL; ptr = devres_alloc(devm_hello_release, 0, GFP_KERNEL); if (!ptr) return NULL; pu32 = debugfs_create_u32(name, mode, parent, pvalue); if (pu32) { devres_add(dev, ptr); } else { devres_free(ptr); } return pu32; }
struct stm_device_state *devm_stm_device_init(struct device *dev, struct stm_device_config *config) { struct stm_device_state *state = devres_alloc(stm_device_devres_exit, sizeof(*state) + sizeof(*state->sysconf_fields) * config->sysconfs_num, GFP_KERNEL); BUG_ON(!dev); BUG_ON(!config); if (state) { if (__stm_device_init(state, config, dev) == 0) { devres_add(dev, state); } else { devres_free(state); state = NULL; } } return state; }
/** * devm_snd_soc_register_component - resource managed component registration * @dev: Device used to manage component * @cmpnt_drv: Component driver * @dai_drv: DAI driver * @num_dai: Number of DAIs to register * * Register a component with automatic unregistration when the device is * unregistered. */ int devm_snd_soc_register_component(struct device *dev, const struct snd_soc_component_driver *cmpnt_drv, struct snd_soc_dai_driver *dai_drv, int num_dai) { struct device **ptr; int ret; ptr = devres_alloc(devm_component_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; ret = snd_soc_register_component(dev, cmpnt_drv, dai_drv, num_dai); if (ret == 0) { *ptr = dev; devres_add(dev, ptr); } else { devres_free(ptr); } return ret; }
void *devm_memremap(struct device *dev, resource_size_t offset, size_t size, unsigned long flags) { void **ptr, *addr; ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL, dev_to_node(dev)); if (!ptr) return ERR_PTR(-ENOMEM); addr = memremap(offset, size, flags); if (addr) { *ptr = addr; devres_add(dev, ptr); } else { devres_free(ptr); return ERR_PTR(-ENXIO); } return addr; }
struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv, unsigned int txqs, unsigned int rxqs) { struct net_device **dr; struct net_device *netdev; dr = devres_alloc(devm_free_netdev, sizeof(*dr), GFP_KERNEL); if (!dr) return NULL; netdev = alloc_etherdev_mqs(sizeof_priv, txqs, rxqs); if (!netdev) { devres_free(dr); return NULL; } *dr = netdev; devres_add(dev, dr); return netdev; }
/** * devm_led_classdev_register - resource managed led_classdev_register() * @parent: The device to register. * @led_cdev: the led_classdev structure for this device. */ int devm_led_classdev_register(struct device *parent, struct led_classdev *led_cdev) { struct led_classdev **dr; int rc; dr = devres_alloc(devm_led_classdev_release, sizeof(*dr), GFP_KERNEL); if (!dr) return -ENOMEM; rc = led_classdev_register(parent, led_cdev); if (rc) { devres_free(dr); return rc; } *dr = led_cdev; devres_add(parent, dr); return 0; }
/** * devm_regulator_register - Resource managed regulator_register() * @regulator_desc: regulator to register * @config: runtime configuration for regulator * * Called by regulator drivers to register a regulator. Returns a * valid pointer to struct regulator_dev on success or an ERR_PTR() on * error. The regulator will automatically be released when the device * is unbound. */ struct regulator_dev *devm_regulator_register(struct device *dev, const struct regulator_desc *regulator_desc, const struct regulator_config *config) { struct regulator_dev **ptr, *rdev; ptr = devres_alloc(devm_rdev_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM); rdev = regulator_register(regulator_desc, config); if (!IS_ERR(rdev)) { *ptr = rdev; devres_add(dev, ptr); } else { devres_free(ptr); } return rdev; }