コード例 #1
0
/**
 * batadv_dat_entry_hash_find - look for a given dat_entry in the local hash
 * table
 * @bat_priv: the bat priv with all the soft interface information
 * @ip: search key
 * @vid: VLAN identifier
 *
 * Return: the dat_entry if found, NULL otherwise.
 */
static struct batadv_dat_entry *
batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip,
			   unsigned short vid)
{
	struct hlist_head *head;
	struct batadv_dat_entry to_find, *dat_entry, *dat_entry_tmp = NULL;
	struct batadv_hashtable *hash = bat_priv->dat.hash;
	u32 index;

	if (!hash)
		return NULL;

	to_find.ip = ip;
	to_find.vid = vid;

	index = batadv_hash_dat(&to_find, hash->size);
	head = &hash->table[index];

	rcu_read_lock();
	hlist_for_each_entry_rcu(dat_entry, head, hash_entry) {
		if (dat_entry->ip != ip)
			continue;

		if (!kref_get_unless_zero(&dat_entry->refcount))
			continue;

		dat_entry_tmp = dat_entry;
		break;
	}
	rcu_read_unlock();

	return dat_entry_tmp;
}
コード例 #2
0
struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
					       struct drm_file *file_priv,
					       uint32_t id, uint32_t type)
{
	struct drm_mode_object *obj = NULL;

	mutex_lock(&dev->mode_config.idr_mutex);
	obj = idr_find(&dev->mode_config.crtc_idr, id);
	if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type)
		obj = NULL;
	if (obj && obj->id != id)
		obj = NULL;

	if (obj && drm_mode_object_lease_required(obj->type) &&
	    !_drm_lease_held(file_priv, obj->id))
		obj = NULL;

	if (obj && obj->free_cb) {
		if (!kref_get_unless_zero(&obj->refcount))
			obj = NULL;
	}
	mutex_unlock(&dev->mode_config.idr_mutex);

	return obj;
}
コード例 #3
0
ファイル: noise.c プロジェクト: eworm-de/wireguard
struct noise_keypair *wg_noise_keypair_get(struct noise_keypair *keypair)
{
	RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(),
		"Taking noise keypair reference without holding the RCU BH read lock");
	if (unlikely(!keypair || !kref_get_unless_zero(&keypair->refcount)))
		return NULL;
	return keypair;
}
コード例 #4
0
ファイル: i915_gem_userptr.c プロジェクト: bluhm/sys
static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
						       struct mm_struct *mm,
						       unsigned long start,
						       unsigned long end,
						       bool blockable)
{
	struct i915_mmu_notifier *mn =
		container_of(_mn, struct i915_mmu_notifier, mn);
	struct i915_mmu_object *mo;
	struct interval_tree_node *it;
	LIST_HEAD(cancelled);

	if (RB_EMPTY_ROOT(&mn->objects.rb_root))
		return 0;

	/* interval ranges are inclusive, but invalidate range is exclusive */
	end--;

	spin_lock(&mn->lock);
	it = interval_tree_iter_first(&mn->objects, start, end);
	while (it) {
		if (!blockable) {
			spin_unlock(&mn->lock);
			return -EAGAIN;
		}
		/* The mmu_object is released late when destroying the
		 * GEM object so it is entirely possible to gain a
		 * reference on an object in the process of being freed
		 * since our serialisation is via the spinlock and not
		 * the struct_mutex - and consequently use it after it
		 * is freed and then double free it. To prevent that
		 * use-after-free we only acquire a reference on the
		 * object if it is not in the process of being destroyed.
		 */
		mo = container_of(it, struct i915_mmu_object, it);
		if (kref_get_unless_zero(&mo->obj->base.refcount))
			queue_work(mn->wq, &mo->work);

		list_add(&mo->link, &cancelled);
		it = interval_tree_iter_next(it, start, end);
	}
	list_for_each_entry(mo, &cancelled, link)
		del_object(mo);
	spin_unlock(&mn->lock);

	if (!list_empty(&cancelled))
		flush_workqueue(mn->wq);

	return 0;
}
コード例 #5
0
ファイル: user.c プロジェクト: teg/bus1
static struct bus1_user *
bus1_user_get(struct bus1_domain_info *domain_info, kuid_t uid)
{
	struct bus1_user *user;

	rcu_read_lock();
	user = idr_find(&domain_info->user_idr, __kuid_val(uid));
	if (user && !kref_get_unless_zero(&user->ref))
		/* the user is about to be destroyed, ignore it */
		user = NULL;
	rcu_read_unlock();

	return user;
}
コード例 #6
0
ファイル: ttm_object.c プロジェクト: ajinkya93/OpenBSD
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
					       uint32_t key)
{
	struct ttm_object_device *tdev = tfile->tdev;
	struct ttm_base_object *base;
	struct drm_hash_item *hash;
	int ret;

	mtx_enter(&tdev->object_lock);
	ret = drm_ht_find_item(&tdev->object_hash, key, &hash);

	if (likely(ret == 0)) {
		base = drm_hash_entry(hash, struct ttm_base_object, hash);
		ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL;
	}
コード例 #7
0
ファイル: ttm_object.c プロジェクト: BozkurTR/kernel
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
					       uint32_t key)
{
	struct ttm_object_device *tdev = tfile->tdev;
	struct ttm_base_object *uninitialized_var(base);
	struct drm_hash_item *hash;
	int ret;

	rcu_read_lock();
	ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash);

	if (likely(ret == 0)) {
		base = drm_hash_entry(hash, struct ttm_base_object, hash);
		ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL;
	}
コード例 #8
0
ファイル: ttm_object.c プロジェクト: CenturyGlorion/linux
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
					       uint32_t key)
{
	struct ttm_base_object *base = NULL;
	struct drm_hash_item *hash;
	struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
	int ret;

	rcu_read_lock();
	ret = drm_ht_find_item_rcu(ht, key, &hash);

	if (likely(ret == 0)) {
		base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
		if (!kref_get_unless_zero(&base->refcount))
			base = NULL;
	}
コード例 #9
0
/**
 * drm_gem_mmap - memory map routine for GEM objects
 * @filp: DRM file pointer
 * @vma: VMA for the area to be mapped
 *
 * If a driver supports GEM object mapping, mmap calls on the DRM file
 * descriptor will end up here.
 *
 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
 * contain the fake offset we created when the GTT map ioctl was called on
 * the object) and map it with a call to drm_gem_mmap_obj().
 *
 * If the caller is not granted access to the buffer object, the mmap will fail
 * with EACCES. Please see the vma manager for more information.
 */
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_file *priv = filp->private_data;
	struct drm_device *dev = priv->minor->dev;
	struct drm_gem_object *obj = NULL;
	struct drm_vma_offset_node *node;
	int ret;

	if (drm_device_is_unplugged(dev))
		return -ENODEV;

	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
						  vma->vm_pgoff,
						  vma_pages(vma));
	if (likely(node)) {
		obj = container_of(node, struct drm_gem_object, vma_node);
		/*
		 * When the object is being freed, after it hits 0-refcnt it
		 * proceeds to tear down the object. In the process it will
		 * attempt to remove the VMA offset and so acquire this
		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
		 * that matches our range, we know it is in the process of being
		 * destroyed and will be freed as soon as we release the lock -
		 * so we have to check for the 0-refcnted object and treat it as
		 * invalid.
		 */
		if (!kref_get_unless_zero(&obj->refcount))
			obj = NULL;
	}
	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);

	if (!obj)
		return -EINVAL;

	if (!drm_vma_node_is_allowed(node, filp)) {
		drm_gem_object_unreference_unlocked(obj);
		return -EACCES;
	}

	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
			       vma);

	drm_gem_object_unreference_unlocked(obj);

	return ret;
}
コード例 #10
0
/**
 * batadv_choose_next_candidate - select the next DHT candidate
 * @bat_priv: the bat priv with all the soft interface information
 * @cands: candidates array
 * @select: number of candidates already present in the array
 * @ip_key: key to look up in the DHT
 * @last_max: pointer where the address of the selected candidate will be saved
 */
static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
					 struct batadv_dat_candidate *cands,
					 int select, batadv_dat_addr_t ip_key,
					 batadv_dat_addr_t *last_max)
{
	batadv_dat_addr_t max = 0;
	batadv_dat_addr_t tmp_max = 0;
	struct batadv_orig_node *orig_node, *max_orig_node = NULL;
	struct batadv_hashtable *hash = bat_priv->orig_hash;
	struct hlist_head *head;
	int i;

	/* if no node is eligible as candidate, leave the candidate type as
	 * NOT_FOUND
	 */
	cands[select].type = BATADV_DAT_CANDIDATE_NOT_FOUND;

	/* iterate over the originator list and find the node with the closest
	 * dat_address which has not been selected yet
	 */
	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
		hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
			/* the dht space is a ring using unsigned addresses */
			tmp_max = BATADV_DAT_ADDR_MAX - orig_node->dat_addr +
				  ip_key;

			if (!batadv_is_orig_node_eligible(cands, select,
							  tmp_max, max,
							  *last_max, orig_node,
							  max_orig_node))
				continue;

			if (!kref_get_unless_zero(&orig_node->refcount))
				continue;

			max = tmp_max;
			if (max_orig_node)
				batadv_orig_node_put(max_orig_node);
			max_orig_node = orig_node;
		}
		rcu_read_unlock();
	}
コード例 #11
0
ファイル: amdgpu_bo_list.c プロジェクト: EMFPGA/linux_media
struct amdgpu_bo_list *
amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
{
	struct amdgpu_bo_list *result;

	rcu_read_lock();
	result = idr_find(&fpriv->bo_list_handles, id);

	if (result) {
		if (kref_get_unless_zero(&result->refcount)) {
			rcu_read_unlock();
			mutex_lock(&result->lock);
		} else {
			rcu_read_unlock();
			result = NULL;
		}
	} else {
		rcu_read_unlock();
	}

	return result;
}
コード例 #12
0
ファイル: bsg-lib.c プロジェクト: Anjali05/linux
int bsg_job_get(struct bsg_job *job)
{
	return kref_get_unless_zero(&job->kref);
}
コード例 #13
0
void dummy(void)
{
	if (!kref_get_unless_zero(NULL))
		return;
}
/* Maps free channel with device */
int nvhost_channel_map(struct nvhost_device_data *pdata,
			struct nvhost_channel **channel,
			void *identifier)
{
	struct nvhost_master *host = NULL;
	struct nvhost_channel *ch = NULL;
	int max_channels = 0;
	int index = 0;

	if (!pdata) {
		pr_err("%s: NULL device data\n", __func__);
		return -EINVAL;
	}

	host = nvhost_get_host(pdata->pdev);

	mutex_lock(&host->chlist_mutex);
	max_channels = nvhost_channel_nb_channels(host);

	/* check if the channel is still in use */
	for (index = 0; index < max_channels; index++) {
		ch = host->chlist[index];
		if (ch->identifier == identifier
			&& kref_get_unless_zero(&ch->refcount)) {
			/* yes, client can continue using it */
			*channel = ch;
			mutex_unlock(&host->chlist_mutex);
			return 0;
		}
	}

	do {
		index = find_first_zero_bit(host->allocated_channels,
					    max_channels);
		if (index >= max_channels) {
			mutex_unlock(&host->chlist_mutex);
			mdelay(1);
			mutex_lock(&host->chlist_mutex);
		}
	} while (index >= max_channels);

	/* Reserve the channel */
	set_bit(index, host->allocated_channels);
	ch = host->chlist[index];

	/* Bind the reserved channel to the device */
	ch->dev = pdata->pdev;
	ch->identifier = identifier;
	kref_init(&ch->refcount);

	/* allocate vm */
	ch->vm = nvhost_vm_allocate(pdata->pdev);
	if (!ch->vm)
		goto err_alloc_vm;

	/* Handle logging */
	trace_nvhost_channel_map(pdata->pdev->name, ch->chid,
				 pdata->num_mapped_chs);
	dev_dbg(&ch->dev->dev, "channel %d mapped\n", ch->chid);

	mutex_unlock(&host->chlist_mutex);

	*channel = ch;

	return 0;

err_alloc_vm:
	clear_bit(index, host->allocated_channels);
	ch->dev = NULL;
	ch->identifier = NULL;

	mutex_unlock(&host->chlist_mutex);

	return -ENOMEM;
}
コード例 #15
0
ファイル: user.c プロジェクト: teg/bus1
/**
 * bus1_user_acquire_by_uid() - get a user object for a uid in the given domain
 * @domain:		domain of the user
 * @uid:		uid of the user
 *
 * Find and return the user object for the uid if it exists, otherwise create it
 * first. The caller is responsible to release their reference (and all derived
 * references) before the parent domain is deactivated!
 *
 * Return: A user object for the given uid, ERR_PTR on failure.
 */
struct bus1_user *
bus1_user_acquire_by_uid(struct bus1_domain *domain, kuid_t uid)
{
	struct bus1_user *user, *old_user, *new_user;
	int r = 0;

	WARN_ON(!uid_valid(uid));

	lockdep_assert_held(&domain->active);

	/* try to get the user without taking a lock */
	user = bus1_user_get(domain->info, uid);
	if (user)
		return user;

	/* didn't exist, allocate a new one */
	new_user = bus1_user_new(domain->info, uid);
	if (IS_ERR(new_user))
		return new_user;

	/*
	 * Allocate the smallest possible internal id for this user; used in
	 * arrays for accounting user quota in receiver pools.
	 */
	r = ida_simple_get(&domain->info->user_ida, 0, 0, GFP_KERNEL);
	if (r < 0)
		goto exit;

	new_user->id = r;

	mutex_lock(&domain->info->lock);
	/*
	 * Someone else might have raced us outside the lock, so check if the
	 * user still does not exist.
	 */
	old_user = idr_find(&domain->info->user_idr, __kuid_val(uid));
	if (likely(!old_user)) {
		/* user does not exist, link the newly created one */
		r = idr_alloc(&domain->info->user_idr, new_user,
			      __kuid_val(uid), __kuid_val(uid) + 1, GFP_KERNEL);
		if (r < 0)
			goto exit;
	} else {
		/* another allocation raced us, try re-using that one */
		if (likely(kref_get_unless_zero(&old_user->ref))) {
			user = old_user;
			goto exit;
		} else {
			/* the other one is getting destroyed, replace it */
			idr_replace(&domain->info->user_idr, new_user,
				    __kuid_val(uid));
			old_user->uid = INVALID_UID; /* mark old as removed */
		}
	}

	user = new_user;
	new_user = NULL;

exit:
	mutex_unlock(&domain->info->lock);
	bus1_user_release(new_user);
	if (r < 0)
		return ERR_PTR(r);
	return user;
}
コード例 #16
0
ファイル: xprtmultipath.c プロジェクト: AlexShiLucky/linux
/**
 * xprt_switch_get - Return a reference to a rpc_xprt_switch
 * @xps: pointer to struct rpc_xprt_switch
 *
 * Returns a reference to xps unless the refcount is already zero.
 */
struct rpc_xprt_switch *xprt_switch_get(struct rpc_xprt_switch *xps)
{
	if (xps != NULL && kref_get_unless_zero(&xps->xps_kref))
		return xps;
	return NULL;
}
コード例 #17
0
ファイル: i915_gem_userptr.c プロジェクト: grate-driver/linux
static int
userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
				  const struct mmu_notifier_range *range)
{
	struct i915_mmu_notifier *mn =
		container_of(_mn, struct i915_mmu_notifier, mn);
	struct interval_tree_node *it;
	struct mutex *unlock = NULL;
	unsigned long end;
	int ret = 0;

	if (RB_EMPTY_ROOT(&mn->objects.rb_root))
		return 0;

	/* interval ranges are inclusive, but invalidate range is exclusive */
	end = range->end - 1;

	spin_lock(&mn->lock);
	it = interval_tree_iter_first(&mn->objects, range->start, end);
	while (it) {
		struct drm_i915_gem_object *obj;

		if (!mmu_notifier_range_blockable(range)) {
			ret = -EAGAIN;
			break;
		}

		/*
		 * The mmu_object is released late when destroying the
		 * GEM object so it is entirely possible to gain a
		 * reference on an object in the process of being freed
		 * since our serialisation is via the spinlock and not
		 * the struct_mutex - and consequently use it after it
		 * is freed and then double free it. To prevent that
		 * use-after-free we only acquire a reference on the
		 * object if it is not in the process of being destroyed.
		 */
		obj = container_of(it, struct i915_mmu_object, it)->obj;
		if (!kref_get_unless_zero(&obj->base.refcount)) {
			it = interval_tree_iter_next(it, range->start, end);
			continue;
		}
		spin_unlock(&mn->lock);

		if (!unlock) {
			unlock = &mn->mm->i915->drm.struct_mutex;

			switch (mutex_trylock_recursive(unlock)) {
			default:
			case MUTEX_TRYLOCK_FAILED:
				if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) {
					i915_gem_object_put(obj);
					return -EINTR;
				}
				/* fall through */
			case MUTEX_TRYLOCK_SUCCESS:
				break;

			case MUTEX_TRYLOCK_RECURSIVE:
				unlock = ERR_PTR(-EEXIST);
				break;
			}
		}

		ret = i915_gem_object_unbind(obj);
		if (ret == 0)
			ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
		i915_gem_object_put(obj);
		if (ret)
			goto unlock;

		spin_lock(&mn->lock);

		/*
		 * As we do not (yet) protect the mmu from concurrent insertion
		 * over this range, there is no guarantee that this search will
		 * terminate given a pathologic workload.
		 */
		it = interval_tree_iter_first(&mn->objects, range->start, end);
	}
	spin_unlock(&mn->lock);

unlock:
	if (!IS_ERR_OR_NULL(unlock))
		mutex_unlock(unlock);

	return ret;

}