Esempio n. 1
0
bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
		struct nd_namespace_common **_ndns)
{
	bool claimed;

	device_lock(&attach->dev);
	claimed = __nd_attach_ndns(dev, attach, _ndns);
	device_unlock(&attach->dev);
	return claimed;
}
Esempio n. 2
0
struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
		struct nd_namespace_common *ndns)
{
	struct device *dev = &nd_pfn->dev;

	if (!nd_pfn)
		return NULL;

	nd_pfn->mode = PFN_MODE_NONE;
	nd_pfn->align = PFN_DEFAULT_ALIGNMENT;
	dev = &nd_pfn->dev;
	device_initialize(&nd_pfn->dev);
	if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
		dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n",
				__func__, dev_name(ndns->claim));
		put_device(dev);
		return NULL;
	}
	return dev;
}
Esempio n. 3
0
static struct device *__nd_pfn_create(struct nd_region *nd_region,
		u8 *uuid, enum nd_pfn_mode mode,
		struct nd_namespace_common *ndns)
{
	struct nd_pfn *nd_pfn;
	struct device *dev;

	/* we can only create pages for contiguous ranged of pmem */
	if (!is_nd_pmem(&nd_region->dev))
		return NULL;

	nd_pfn = kzalloc(sizeof(*nd_pfn), GFP_KERNEL);
	if (!nd_pfn)
		return NULL;

	nd_pfn->id = ida_simple_get(&nd_region->pfn_ida, 0, 0, GFP_KERNEL);
	if (nd_pfn->id < 0) {
		kfree(nd_pfn);
		return NULL;
	}

	nd_pfn->mode = mode;
	if (uuid)
		uuid = kmemdup(uuid, 16, GFP_KERNEL);
	nd_pfn->uuid = uuid;
	dev = &nd_pfn->dev;
	dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id);
	dev->parent = &nd_region->dev;
	dev->type = &nd_pfn_device_type;
	dev->groups = nd_pfn_attribute_groups;
	device_initialize(&nd_pfn->dev);
	if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
		dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n",
				__func__, dev_name(ndns->claim));
		put_device(dev);
		return NULL;
	}
	return dev;
}