static int __init radix_tree_example_init(void)
{
	char a[40] = "hello, radix tree";
	radix_tree_insert(&mytree, 0, (void *)a);
	radix_tree_insert(&mytree, 4, (void *)a);
	radix_tree_insert(&mytree, 131, (void *)a);
	radix_tree_insert(&mytree, 4096, (void *)a);

	radix_tree_lookup(&mytree, 1);
	printk(KERN_ALERT "[Hello] radix_tree_example \n");
	return 0;
}
Beispiel #2
0
/*
 * Look up an inode by number in the given file system.
 * The inode is looked up in the cache held in each AG.
 * If the inode is found in the cache, initialise the vfs inode
 * if necessary.
 *
 * If it is not in core, read it in from the file system's device,
 * add it to the cache and initialise the vfs inode.
 *
 * The inode is locked according to the value of the lock_flags parameter.
 * This flag parameter indicates how and if the inode's IO lock and inode lock
 * should be taken.
 *
 * mp -- the mount point structure for the current file system.  It points
 *       to the inode hash table.
 * tp -- a pointer to the current transaction if there is one.  This is
 *       simply passed through to the xfs_iread() call.
 * ino -- the number of the inode desired.  This is the unique identifier
 *        within the file system for the inode being requested.
 * lock_flags -- flags indicating how to lock the inode.  See the comment
 *		 for xfs_ilock() for a list of valid values.
 */
int
xfs_iget(
	xfs_mount_t	*mp,
	xfs_trans_t	*tp,
	xfs_ino_t	ino,
	uint		flags,
	uint		lock_flags,
	xfs_inode_t	**ipp)
{
	xfs_inode_t	*ip;
	int		error;
	xfs_perag_t	*pag;
	xfs_agino_t	agino;

	/* reject inode numbers outside existing AGs */
	if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
		return EINVAL;

	/* get the perag structure and ensure that it's inode capable */
	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
	agino = XFS_INO_TO_AGINO(mp, ino);

again:
	error = 0;
	rcu_read_lock();
	ip = radix_tree_lookup(&pag->pag_ici_root, agino);

	if (ip) {
		error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
		if (error)
			goto out_error_or_again;
	} else {
		rcu_read_unlock();
		XFS_STATS_INC(xs_ig_missed);

		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
							flags, lock_flags);
		if (error)
			goto out_error_or_again;
	}
	xfs_perag_put(pag);

	*ipp = ip;

	/*
	 * If we have a real type for an on-disk inode, we can set ops(&unlock)
	 * now.	 If it's a new inode being created, xfs_ialloc will handle it.
	 */
	if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
		xfs_setup_inode(ip);
	return 0;

out_error_or_again:
	if (error == EAGAIN) {
		delay(1);
		goto again;
	}
	xfs_perag_put(pag);
	return error;
}
Beispiel #3
0
/* Allocate memory for the iint associated with the inode
 * from the iint_cache slab, initialize the iint, and
 * insert it into the radix tree.
 *
 * On success return a pointer to the iint; on failure return NULL.
 */
struct ima_iint_cache *ima_iint_insert(struct inode *inode)
{
    struct ima_iint_cache *iint = NULL;
    int rc = 0;

    if (!ima_initialized)
        return iint;
    iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
    if (!iint)
        return iint;

    rc = radix_tree_preload(GFP_NOFS);
    if (rc < 0)
        goto out;

    spin_lock(&ima_iint_lock);
    rc = radix_tree_insert(&ima_iint_store, (unsigned long)inode, iint);
    spin_unlock(&ima_iint_lock);
out:
    if (rc < 0) {
        kmem_cache_free(iint_cache, iint);
        if (rc == -EEXIST) {
            spin_lock(&ima_iint_lock);
            iint = radix_tree_lookup(&ima_iint_store,
                                     (unsigned long)inode);
            spin_unlock(&ima_iint_lock);
        } else
            iint = NULL;
    }
    radix_tree_preload_end();
    return iint;
}
/**
 * hwspin_lock_request_specific() - request for a specific hwspinlock
 * @id: index of the specific hwspinlock that is requested
 *
 * This function should be called by users of the hwspinlock module,
 * in order to assign them a specific hwspinlock.
 * Usually early board code will be calling this function in order to
 * reserve specific hwspinlock ids for predefined purposes.
 *
 * Should be called from a process context (might sleep)
 *
 * Returns the address of the assigned hwspinlock, or NULL on error
 */
struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
{
	struct hwspinlock *hwlock;
	int ret;

	mutex_lock(&hwspinlock_tree_lock);

	/* make sure this hwspinlock exists */
	hwlock = radix_tree_lookup(&hwspinlock_tree, id);
	if (!hwlock) {
		pr_warn("hwspinlock %u does not exist\n", id);
		goto out;
	}

	/* sanity check (this shouldn't happen) */
	WARN_ON(hwlock_to_id(hwlock) != id);

	/* make sure this hwspinlock is unused */
	ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
	if (ret == 0) {
		pr_warn("hwspinlock %u is already in use\n", id);
		hwlock = NULL;
		goto out;
	}

	/* mark as used and power up */
	ret = __hwspin_lock_request(hwlock);
	if (ret < 0)
		hwlock = NULL;

out:
	mutex_unlock(&hwspinlock_tree_lock);
	return hwlock;
}
Beispiel #5
0
static struct q_irq_data *qpnpint_alloc_irq_data(
					struct q_chip_data *chip_d,
					unsigned long hwirq)
{
	struct q_irq_data *irq_d;
	struct q_perip_data *per_d;

	irq_d = kzalloc(sizeof(struct q_irq_data), GFP_KERNEL);
	if (!irq_d)
		return ERR_PTR(-ENOMEM);

	/**
	 * The Peripheral Tree is keyed from the slave + per_id. We're
	 * ignoring the irq bits here since this peripheral structure
	 * should be common for all irqs on the same peripheral.
	 */
	per_d = radix_tree_lookup(&chip_d->per_tree, (hwirq & ~0x7));
	if (!per_d) {
		per_d = kzalloc(sizeof(struct q_perip_data), GFP_KERNEL);
		if (!per_d) {
			kfree(irq_d);
			return ERR_PTR(-ENOMEM);
		}
		radix_tree_insert(&chip_d->per_tree,
				  (hwirq & ~0x7), per_d);
	}
	irq_d->per_d = per_d;

	return irq_d;
}
Beispiel #6
0
/**
 * wb_get_create - get wb for a given memcg, create if necessary
 * @bdi: target bdi
 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
 * @gfp: allocation mask to use
 *
 * Try to get the wb for @memcg_css on @bdi.  If it doesn't exist, try to
 * create one.  The returned wb has its refcount incremented.
 *
 * This function uses css_get() on @memcg_css and thus expects its refcnt
 * to be positive on invocation.  IOW, rcu_read_lock() protection on
 * @memcg_css isn't enough.  try_get it before calling this function.
 *
 * A wb is keyed by its associated memcg.  As blkcg implicitly enables
 * memcg on the default hierarchy, memcg association is guaranteed to be
 * more specific (equal or descendant to the associated blkcg) and thus can
 * identify both the memcg and blkcg associations.
 *
 * Because the blkcg associated with a memcg may change as blkcg is enabled
 * and disabled closer to root in the hierarchy, each wb keeps track of
 * both the memcg and blkcg associated with it and verifies the blkcg on
 * each lookup.  On mismatch, the existing wb is discarded and a new one is
 * created.
 */
struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
				    struct cgroup_subsys_state *memcg_css,
				    gfp_t gfp)
{
	struct bdi_writeback *wb;

	might_sleep_if(gfpflags_allow_blocking(gfp));

	if (!memcg_css->parent)
		return &bdi->wb;

	do {
		rcu_read_lock();
		wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
		if (wb) {
			struct cgroup_subsys_state *blkcg_css;

			/* see whether the blkcg association has changed */
			blkcg_css = cgroup_get_e_css(memcg_css->cgroup,
						     &io_cgrp_subsys);
			if (unlikely(wb->blkcg_css != blkcg_css ||
				     !wb_tryget(wb)))
				wb = NULL;
			css_put(blkcg_css);
		}
		rcu_read_unlock();
	} while (!wb && !cgwb_create(bdi, memcg_css, gfp));

	return wb;
}
void uvm_pmm_sysmem_mappings_reparent_gpu_mapping(uvm_pmm_sysmem_mappings_t *sysmem_mappings,
                                                  NvU64 dma_addr,
                                                  uvm_va_block_t *va_block)
{
    NvU64 virt_addr;
    uvm_reverse_map_t *reverse_map;
    const NvU64 base_key = dma_addr / PAGE_SIZE;
    uvm_page_index_t new_start_page;

    UVM_ASSERT(PAGE_ALIGNED(dma_addr));
    UVM_ASSERT(va_block);
    UVM_ASSERT(va_block->va_range);

    if (!sysmem_mappings->gpu->access_counters_supported)
        return;

    uvm_spin_lock(&sysmem_mappings->reverse_map_lock);

    reverse_map = radix_tree_lookup(&sysmem_mappings->reverse_map_tree, base_key);
    UVM_ASSERT(reverse_map);

    // Compute virt address by hand since the old VA block may be messed up
    // during split
    virt_addr = reverse_map->va_block->start + reverse_map->region.first * PAGE_SIZE;
    new_start_page = uvm_va_block_cpu_page_index(va_block, virt_addr);

    reverse_map->region   = uvm_va_block_region(new_start_page,
                                                new_start_page + uvm_va_block_region_num_pages(reverse_map->region));
    reverse_map->va_block = va_block;

    UVM_ASSERT(uvm_va_block_contains_address(va_block, uvm_reverse_map_start(reverse_map)));
    UVM_ASSERT(uvm_va_block_contains_address(va_block, uvm_reverse_map_end(reverse_map)));

    uvm_spin_unlock(&sysmem_mappings->reverse_map_lock);
}
Beispiel #8
0
void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
{
	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
	struct mlx4_mac_table *table = &info->mac_table;
	int index = qpn - info->base_qpn;
	struct mlx4_mac_entry *entry;

	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
		entry = radix_tree_lookup(&info->mac_tree, qpn);
		if (entry) {
			mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
			radix_tree_delete(&info->mac_tree, qpn);
			index = find_index(dev, table, entry->mac);
			kfree(entry);
		}
	}

	mutex_lock(&table->mutex);

	if (validate_index(dev, table, index))
		goto out;

	/* Check whether this address has reference count */
	if (!(--table->refs[index])) {
		table->entries[index] = 0;
		mlx4_set_port_mac_table(dev, port, table->entries);
		--table->total;
	}
out:
	mutex_unlock(&table->mutex);
}
Beispiel #9
0
void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
{
	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
	struct mlx4_mac_table *table = &info->mac_table;
	int index = qpn - info->base_qpn;
	struct mlx4_mac_entry *entry;

	if (dev->caps.vep_uc_steering) {
		entry = radix_tree_lookup(&info->mac_tree, qpn);
		if (entry) {
			mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
			radix_tree_delete(&info->mac_tree, qpn);
			index = find_index(dev, table, entry->mac);
			kfree(entry);
		}
	}

	mutex_lock(&table->mutex);

	if (validate_index(dev, table, index))
		goto out;

	table->entries[index] = 0;
	mlx4_set_port_mac_table(dev, port, table->entries);
	--table->total;
out:
	mutex_unlock(&table->mutex);
}
Beispiel #10
0
static struct extent_tree *__grab_extent_tree(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct extent_tree *et;
	nid_t ino = inode->i_ino;

	down_write(&sbi->extent_tree_lock);
	et = radix_tree_lookup(&sbi->extent_tree_root, ino);
	if (!et) {
		et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
		f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
		memset(et, 0, sizeof(struct extent_tree));
		et->ino = ino;
		et->root = RB_ROOT;
		et->cached_en = NULL;
		rwlock_init(&et->lock);
		atomic_set(&et->refcount, 0);
		et->count = 0;
		sbi->total_ext_tree++;
	}
	atomic_inc(&et->refcount);
	up_write(&sbi->extent_tree_lock);

	/* never died until evict_inode */
	F2FS_I(inode)->extent_tree = et;

	return et;
}
Beispiel #11
0
/*
 * Look up the dquot in the in-core cache.  If found, the dquot is returned
 * locked and ready to go.
 */
static struct xfs_dquot *
xfs_qm_dqget_cache_lookup(
	struct xfs_mount	*mp,
	struct xfs_quotainfo	*qi,
	struct radix_tree_root	*tree,
	xfs_dqid_t		id)
{
	struct xfs_dquot	*dqp;

restart:
	mutex_lock(&qi->qi_tree_lock);
	dqp = radix_tree_lookup(tree, id);
	if (!dqp) {
		mutex_unlock(&qi->qi_tree_lock);
		XFS_STATS_INC(mp, xs_qm_dqcachemisses);
		return NULL;
	}

	xfs_dqlock(dqp);
	if (dqp->dq_flags & XFS_DQ_FREEING) {
		xfs_dqunlock(dqp);
		mutex_unlock(&qi->qi_tree_lock);
		trace_xfs_dqget_freeing(dqp);
		delay(1);
		goto restart;
	}

	dqp->q_nrefs++;
	mutex_unlock(&qi->qi_tree_lock);

	trace_xfs_dqget_hit(dqp);
	XFS_STATS_INC(mp, xs_qm_dqcachehits);
	return dqp;
}
Beispiel #12
0
/**
 * ioc_lookup_icq - lookup io_cq from ioc
 * @ioc: the associated io_context
 * @q: the associated request_queue
 *
 * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
 * with @q->queue_lock held.
 */
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
{
	struct io_cq *icq;

	lockdep_assert_held(q->queue_lock);

	/*
	 * icq's are indexed from @ioc using radix tree and hint pointer,
	 * both of which are protected with RCU.  All removals are done
	 * holding both q and ioc locks, and we're holding q lock - if we
	 * find a icq which points to us, it's guaranteed to be valid.
	 */
	rcu_read_lock();
	icq = rcu_dereference(ioc->icq_hint);
	if (icq && icq->q == q)
		goto out;

	icq = radix_tree_lookup(&ioc->icq_tree, q->id);
	if (icq && icq->q == q)
		rcu_assign_pointer(ioc->icq_hint, icq);	/* allowed to race */
	else
		icq = NULL;
out:
	rcu_read_unlock();
	return icq;
}
Beispiel #13
0
// ARM10C 20141122
// irq: 16
// ARM10C 20150321
// irq: 347
// ARM10C 20150328
// irq: 152
// ARM10C 20150509
// irq: 152
// ARM10C 20150516
// irq: 152
// ARM10C 20150523
// irq: 347
struct irq_desc *irq_to_desc(unsigned int irq)
{
	// irq: 16, radix_tree_lookup(&irq_desc_tree, 16): kmem_cache#28-oX (irq 16)
	// irq: 347, radix_tree_lookup(&irq_desc_tree, 347): kmem_cache#28-oX (irq 347)
	return radix_tree_lookup(&irq_desc_tree, irq);
	// return kmem_cache#28-oX (irq 16)
	// return kmem_cache#28-oX (irq 347)
}
int
xfs_iget(
	xfs_mount_t	*mp,
	xfs_trans_t	*tp,
	xfs_ino_t	ino,
	uint		flags,
	uint		lock_flags,
	xfs_inode_t	**ipp)
{
	xfs_inode_t	*ip;
	int		error;
	xfs_perag_t	*pag;
	xfs_agino_t	agino;

	ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);

	
	if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
		return EINVAL;

	
	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
	agino = XFS_INO_TO_AGINO(mp, ino);

again:
	error = 0;
	rcu_read_lock();
	ip = radix_tree_lookup(&pag->pag_ici_root, agino);

	if (ip) {
		error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
		if (error)
			goto out_error_or_again;
	} else {
		rcu_read_unlock();
		XFS_STATS_INC(xs_ig_missed);

		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
							flags, lock_flags);
		if (error)
			goto out_error_or_again;
	}
	xfs_perag_put(pag);

	*ipp = ip;

	if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
		xfs_setup_inode(ip);
	return 0;

out_error_or_again:
	if (error == EAGAIN) {
		delay(1);
		goto again;
	}
	xfs_perag_put(pag);
	return error;
}
Beispiel #15
0
int si_pid_test_slow(struct super_block *sb)
{
	void *p;

	rcu_read_lock();
	p = radix_tree_lookup(&au_sbi(sb)->au_si_pid.tree, current->pid);
	rcu_read_unlock();

	return (long)!!p;
}
Beispiel #16
0
/*
 * check to see if a page is being written to the cache
 */
bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
{
	void *val;

	rcu_read_lock();
	val = radix_tree_lookup(&cookie->stores, page->index);
	rcu_read_unlock();

	return val != NULL;
}
Beispiel #17
0
/** free memory allocated for disk
 * @param none
 * @ret none
 */
void sbd_free_diskmem(void)
{
	int i;
	void *p;

	for(i = 0; i < (SBD_BYTES + PAGE_SIZE - 1) >> PAGE_SHIFT; i++){
		p = radix_tree_lookup(&sbd_data, i);
		radix_tree_delete(&sbd_data, i);
		free_page((unsigned long)p);
	}
}
Beispiel #18
0
struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
{
	struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
	struct mlx5e_vxlan *vxlan;

	spin_lock(&vxlan_db->lock);
	vxlan = radix_tree_lookup(&vxlan_db->tree, port);
	spin_unlock(&vxlan_db->lock);

	return vxlan;
}
Beispiel #19
0
Datei: srq.c Projekt: mdamt/linux
struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn)
{
	struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
	struct mlx4_srq *srq;

	rcu_read_lock();
	srq = radix_tree_lookup(&srq_table->tree,
				srqn & (dev->caps.num_srqs - 1));
	rcu_read_unlock();

	return srq;
}
size_t uvm_pmm_sysmem_mappings_dma_to_virt(uvm_pmm_sysmem_mappings_t *sysmem_mappings,
                                           NvU64 dma_addr,
                                           NvU64 region_size,
                                           uvm_reverse_map_t *out_mappings,
                                           size_t max_out_mappings)
{
    NvU64 key;
    size_t num_mappings = 0;
    const NvU64 base_key = dma_addr / PAGE_SIZE;
    NvU32 num_pages = region_size / PAGE_SIZE;

    UVM_ASSERT(region_size >= PAGE_SIZE);
    UVM_ASSERT(PAGE_ALIGNED(region_size));
    UVM_ASSERT(sysmem_mappings->gpu->access_counters_supported);
    UVM_ASSERT(max_out_mappings > 0);

    uvm_spin_lock(&sysmem_mappings->reverse_map_lock);

    key = base_key;
    do {
        uvm_reverse_map_t *reverse_map = radix_tree_lookup(&sysmem_mappings->reverse_map_tree, key);

        if (reverse_map) {
            size_t num_chunk_pages = uvm_va_block_region_num_pages(reverse_map->region);
            NvU32 page_offset = key & (num_chunk_pages - 1);
            NvU32 num_mapping_pages = min(num_pages, (NvU32)num_chunk_pages - page_offset);

            // Sysmem mappings are removed during VA block destruction.
            // Therefore, we can safely retain the VA blocks as long as they
            // are in the reverse map and we hold the reverse map lock.
            uvm_va_block_retain(reverse_map->va_block);
            out_mappings[num_mappings]               = *reverse_map;
            out_mappings[num_mappings].region.first += page_offset;
            out_mappings[num_mappings].region.outer  = out_mappings[num_mappings].region.first + num_mapping_pages;

            if (++num_mappings == max_out_mappings)
                break;

            num_pages -= num_mapping_pages;
            key       += num_mapping_pages;
        }
        else {
            --num_pages;
            ++key;
        }
    }
    while (num_pages > 0);

    uvm_spin_unlock(&sysmem_mappings->reverse_map_lock);

    return num_mappings;
}
Beispiel #21
0
struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn)
{
	struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
	struct mlx4_srq *srq;
	unsigned long flags;

	spin_lock_irqsave(&srq_table->lock, flags);
	srq = radix_tree_lookup(&srq_table->tree,
				srqn & (dev->caps.num_srqs - 1));
	spin_unlock_irqrestore(&srq_table->lock, flags);

	return srq;
}
Beispiel #22
0
/* ima_iint_find_get - return the iint associated with an inode
 *
 * ima_iint_find_get gets a reference to the iint. Caller must
 * remember to put the iint reference.
 */
struct ima_iint_cache *ima_iint_find_get(struct inode *inode)
{
    struct ima_iint_cache *iint;

    rcu_read_lock();
    iint = radix_tree_lookup(&ima_iint_store, (unsigned long)inode);
    if (!iint)
        goto out;
    kref_get(&iint->refcount);
out:
    rcu_read_unlock();
    return iint;
}
Beispiel #23
0
struct mpls_ilm* 
mpls_get_ilm (unsigned int key) 
{
	struct mpls_ilm *ilm = NULL;

	rcu_read_lock();
	ilm = radix_tree_lookup (&mpls_ilm_tree,key);
	smp_read_barrier_depends();
	if (likely(ilm))
		mpls_ilm_hold(ilm);

	rcu_read_unlock();
	return ilm;
}
Beispiel #24
0
Datei: dir.c Projekt: NVSL/NOVA
struct nova_dentry *nova_find_dentry(struct super_block *sb,
	struct nova_inode *pi, struct inode *inode, const char *name,
	unsigned long name_len)
{
	struct nova_inode_info *si = NOVA_I(inode);
	struct nova_inode_info_header *sih = &si->header;
	struct nova_dentry *direntry;
	unsigned long hash;

	hash = BKDRHash(name, name_len);
	direntry = radix_tree_lookup(&sih->tree, hash);

	return direntry;
}
Beispiel #25
0
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
{
	struct mlx5_srq_table *table = &dev->priv.srq_table;
	struct mlx5_core_srq *srq;

	spin_lock(&table->lock);

	srq = radix_tree_lookup(&table->tree, srqn);
	if (srq)
		atomic_inc(&srq->refcount);

	spin_unlock(&table->lock);

	return srq;
}
Beispiel #26
0
struct mpls_nhlfe*  
mpls_get_nhlfe (unsigned int key) 
{
	struct mpls_nhlfe *nhlfe = NULL;

	rcu_read_lock();
	nhlfe = radix_tree_lookup (&mpls_nhlfe_tree, key);
	smp_read_barrier_depends();
	if (likely(nhlfe)) {
		mpls_nhlfe_hold(nhlfe);
	}
	rcu_read_unlock();

	return nhlfe;
}
Beispiel #27
0
Datei: cq.c Projekt: 274914765/C
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
{
    struct mlx4_cq *cq;

    cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
                   cqn & (dev->caps.num_cqs - 1));
    if (!cq) {
        mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
        return;
    }

    ++cq->arm_sn;

    cq->comp(cq);
}
Beispiel #28
0
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
{
	struct device *dev = hr_dev->dev;
	struct hns_roce_cq *cq;

	cq = radix_tree_lookup(&hr_dev->cq_table.tree,
			       cqn & (hr_dev->caps.num_cqs - 1));
	if (!cq) {
		dev_warn(dev, "Completion event for bogus CQ 0x%08x\n", cqn);
		return;
	}

	++cq->arm_sn;
	cq->comp(cq);
}
Beispiel #29
0
/*
 * decide whether a page can be released, possibly by cancelling a store to it
 * - we're allowed to sleep if __GFP_DIRECT_RECLAIM is flagged
 */
bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
				  struct page *page,
				  gfp_t gfp)
{
	struct page *xpage;
	void *val;

	_enter("%p,%p,%x", cookie, page, gfp);

try_again:
	rcu_read_lock();
	val = radix_tree_lookup(&cookie->stores, page->index);
	if (!val) {
		rcu_read_unlock();
		fscache_stat(&fscache_n_store_vmscan_not_storing);
		__fscache_uncache_page(cookie, page);
		return true;
	}

	/* see if the page is actually undergoing storage - if so we can't get
	 * rid of it till the cache has finished with it */
	if (radix_tree_tag_get(&cookie->stores, page->index,
			       FSCACHE_COOKIE_STORING_TAG)) {
		rcu_read_unlock();
		goto page_busy;
	}

	/* the page is pending storage, so we attempt to cancel the store and
	 * discard the store request so that the page can be reclaimed */
	spin_lock(&cookie->stores_lock);
	rcu_read_unlock();

	if (radix_tree_tag_get(&cookie->stores, page->index,
			       FSCACHE_COOKIE_STORING_TAG)) {
		/* the page started to undergo storage whilst we were looking,
		 * so now we can only wait or return */
		spin_unlock(&cookie->stores_lock);
		goto page_busy;
	}

	xpage = radix_tree_delete(&cookie->stores, page->index);
	spin_unlock(&cookie->stores_lock);

	if (xpage) {
		fscache_stat(&fscache_n_store_vmscan_cancelled);
		fscache_stat(&fscache_n_store_radix_deletes);
		ASSERTCMP(xpage, ==, page);
	} else {
Beispiel #30
0
int main(void)
{
    char *test[] = {"abc", "def", "ghi", "jkl", "mno", "pqr", "stu", "vwx", "yz0",
                    "123", "456", "789", "zyx", "wvu", "tsr", "qpo", "nml", "kji"};

    int i = 0;
    int num = sizeof(test)/sizeof(*test);

    printf("num:%d\n", num);

    radix_tree_head *head = radix_tree_head_new();
    radix_tree_initial(head);

    if(!head)
    {
        printf("alloc head failed\n");
    }

    ///插入数据测试
    for(i = 0; i < num; i++)
    {
        radix_tree_insert(head, i, test[i]);
    }


    /// 从指定的index开始,查找固定max_items个数据
    void *result[5];

    radix_tree_gang_lookup(head,result,2,5); //从第二个之后开始找

    for(int i=0;i<6;i++)
    {
        printf("%d :%s\n",i,result[i]);
    }

    ///从指定位置查找数据
    for(i = 0; i < num; i++)
    {
        printf("%s\n",(char*) radix_tree_lookup(head, i));
    }

    for(i = 0; i < num; i++)
    {
        radix_tree_delete(head, i);
    }

    return 0;
}