Пример #1
1
static int __init radix_tree_example_init(void)
{
	char a[40] = "hello, radix tree";
	radix_tree_insert(&mytree, 0, (void *)a);
	radix_tree_insert(&mytree, 4, (void *)a);
	radix_tree_insert(&mytree, 131, (void *)a);
	radix_tree_insert(&mytree, 4096, (void *)a);

	radix_tree_lookup(&mytree, 1);
	printk(KERN_ALERT "[Hello] radix_tree_example \n");
	return 0;
}
Пример #2
0
static void *regression1_fn(void *arg)
{
	rcu_register_thread();

	if (pthread_barrier_wait(&worker_barrier) ==
			PTHREAD_BARRIER_SERIAL_THREAD) {
		int j;

		for (j = 0; j < 1000000; j++) {
			struct page *p;

			p = page_alloc();
			pthread_mutex_lock(&mt_lock);
			radix_tree_insert(&mt_tree, 0, p);
			pthread_mutex_unlock(&mt_lock);

			p = page_alloc();
			pthread_mutex_lock(&mt_lock);
			radix_tree_insert(&mt_tree, 1, p);
			pthread_mutex_unlock(&mt_lock);

			pthread_mutex_lock(&mt_lock);
			p = radix_tree_delete(&mt_tree, 1);
			pthread_mutex_lock(&p->lock);
			p->count--;
			pthread_mutex_unlock(&p->lock);
			pthread_mutex_unlock(&mt_lock);
			page_free(p);

			pthread_mutex_lock(&mt_lock);
			p = radix_tree_delete(&mt_tree, 0);
			pthread_mutex_lock(&p->lock);
			p->count--;
			pthread_mutex_unlock(&p->lock);
			pthread_mutex_unlock(&mt_lock);
			page_free(p);
		}
	} else {
		int j;

		for (j = 0; j < 100000000; j++) {
			struct page *pages[10];

			find_get_pages(0, 10, pages);
		}
	}

	rcu_unregister_thread();

	return NULL;
}
Пример #3
0
Файл: p2m.c Проект: Fantu/Xen
static int p2m_mem_access_radix_set(struct p2m_domain *p2m, unsigned long pfn,
                                    p2m_access_t a)
{
    int rc;

    if ( !p2m->mem_access_enabled )
        return 0;

    if ( p2m_access_rwx == a )
    {
        radix_tree_delete(&p2m->mem_access_settings, pfn);
        return 0;
    }

    rc = radix_tree_insert(&p2m->mem_access_settings, pfn,
                           radix_tree_int_to_ptr(a));
    if ( rc == -EEXIST )
    {
        /* If a setting already exists, change it to the new one */
        radix_tree_replace_slot(
            radix_tree_lookup_slot(
                &p2m->mem_access_settings, pfn),
            radix_tree_int_to_ptr(a));
        rc = 0;
    }

    return rc;
}
/*
 * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
 * but sets SwapCache flag and private instead of mapping and index.
 */
int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
{
	int error;

	VM_BUG_ON(!PageLocked(page));
	VM_BUG_ON(PageSwapCache(page));
	VM_BUG_ON(!PageSwapBacked(page));

	error = radix_tree_preload(gfp_mask);
	if (!error) {
		page_cache_get(page);
		SetPageSwapCache(page);
		set_page_private(page, entry.val);

		spin_lock_irq(&swapper_space.tree_lock);
		error = radix_tree_insert(&swapper_space.page_tree,
						entry.val, page);
		if (likely(!error)) {
			total_swapcache_pages++;
			__inc_zone_page_state(page, NR_FILE_PAGES);
			INC_CACHE_INFO(add_total);
		}
		spin_unlock_irq(&swapper_space.tree_lock);
		radix_tree_preload_end();

		if (unlikely(error)) {
			set_page_private(page, 0UL);
			ClearPageSwapCache(page);
			page_cache_release(page);
		}
	}
	return error;
}
Пример #5
0
/** alloc memory for disk using page allocator
 * @param none
 * @ret int 0 for success, non-zero for failure
 */
int sbd_alloc_diskmem(void)
{
	int ret;
	int i;
	void *p;

	INIT_RADIX_TREE(&sbd_data, GFP_KERNEL);

	for(i = 0; i < (SBD_BYTES + PAGE_SIZE - 1) >> PAGE_SHIFT; i++){
		p = (void *)__get_free_page(GFP_KERNEL);
		if(!p){
			ret = -ENOMEM;
			goto err_alloc;
		}

		ret = radix_tree_insert(&sbd_data, i, p);
		if(IS_ERR_VALUE(ret)){
			goto err_radix_tree_insert;
		}
	}
	return 0;

err_radix_tree_insert:
	free_page((unsigned long)p);
err_alloc:
	sbd_free_diskmem();

	return ret;
}
Пример #6
0
static void mlx5e_vxlan_add_port(struct work_struct *work)
{
	struct mlx5e_vxlan_work *vxlan_work =
		container_of(work, struct mlx5e_vxlan_work, work);
	struct mlx5e_priv *priv = vxlan_work->priv;
	struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
	u16 port = vxlan_work->port;
	struct mlx5e_vxlan *vxlan;
	int err;

	if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
		goto free_work;

	vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
	if (!vxlan)
		goto err_delete_port;

	vxlan->udp_port = port;

	spin_lock_irq(&vxlan_db->lock);
	err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
	spin_unlock_irq(&vxlan_db->lock);
	if (err)
		goto err_free;

	goto free_work;

err_free:
	kfree(vxlan);
err_delete_port:
	mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
free_work:
	kfree(vxlan_work);
}
Пример #7
0
static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
				 struct hns_roce_qp *hr_qp)
{
	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
	int ret;

	if (!qpn)
		return -EINVAL;

	hr_qp->qpn = qpn;

	spin_lock_irq(&qp_table->lock);
	ret = radix_tree_insert(&hr_dev->qp_table_tree,
				hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
	spin_unlock_irq(&qp_table->lock);
	if (ret) {
		dev_err(hr_dev->dev, "QPC radix_tree_insert failed\n");
		goto err_put_irrl;
	}

	atomic_set(&hr_qp->refcount, 1);
	init_completion(&hr_qp->free);

	return 0;

err_put_irrl:

	return ret;
}
Пример #8
0
/*
 * Try to insert a new dquot into the in-core cache.  If an error occurs the
 * caller should throw away the dquot and start over.  Otherwise, the dquot
 * is returned locked (and held by the cache) as if there had been a cache
 * hit.
 */
static int
xfs_qm_dqget_cache_insert(
	struct xfs_mount	*mp,
	struct xfs_quotainfo	*qi,
	struct radix_tree_root	*tree,
	xfs_dqid_t		id,
	struct xfs_dquot	*dqp)
{
	int			error;

	mutex_lock(&qi->qi_tree_lock);
	error = radix_tree_insert(tree, id, dqp);
	if (unlikely(error)) {
		/* Duplicate found!  Caller must try again. */
		WARN_ON(error != -EEXIST);
		mutex_unlock(&qi->qi_tree_lock);
		trace_xfs_dqget_dup(dqp);
		return error;
	}

	/* Return a locked dquot to the caller, with a reference taken. */
	xfs_dqlock(dqp);
	dqp->q_nrefs = 1;

	qi->qi_dquots++;
	mutex_unlock(&qi->qi_tree_lock);

	return 0;
}
Пример #9
0
static struct q_irq_data *qpnpint_alloc_irq_data(
					struct q_chip_data *chip_d,
					unsigned long hwirq)
{
	struct q_irq_data *irq_d;
	struct q_perip_data *per_d;

	irq_d = kzalloc(sizeof(struct q_irq_data), GFP_KERNEL);
	if (!irq_d)
		return ERR_PTR(-ENOMEM);

	/**
	 * The Peripheral Tree is keyed from the slave + per_id. We're
	 * ignoring the irq bits here since this peripheral structure
	 * should be common for all irqs on the same peripheral.
	 */
	per_d = radix_tree_lookup(&chip_d->per_tree, (hwirq & ~0x7));
	if (!per_d) {
		per_d = kzalloc(sizeof(struct q_perip_data), GFP_KERNEL);
		if (!per_d) {
			kfree(irq_d);
			return ERR_PTR(-ENOMEM);
		}
		radix_tree_insert(&chip_d->per_tree,
				  (hwirq & ~0x7), per_d);
	}
	irq_d->per_d = per_d;

	return irq_d;
}
Пример #10
0
int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port)
{
	struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
	struct mlx5e_vxlan *vxlan;
	int err;

	err = mlx5e_vxlan_core_add_port_cmd(priv->mdev, port);
	if (err)
		return err;

	vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
	if (!vxlan) {
		err = -ENOMEM;
		goto err_delete_port;
	}

	vxlan->udp_port = port;

	spin_lock_irq(&vxlan_db->lock);
	err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
	spin_unlock_irq(&vxlan_db->lock);
	if (err)
		goto err_free;

	return 0;

err_free:
	kfree(vxlan);
err_delete_port:
	mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
	return err;
}
Пример #11
0
/**
 * bfq_cic_link - add @cic to @ioc.
 * @bfqd: bfq_data @cic refers to.
 * @ioc: io_context @cic belongs to.
 * @cic: the cic to link.
 * @gfp_mask: the mask to use for radix tree preallocations.
 *
 * Add @cic to @ioc, using @bfqd as the search key.  This enables us to
 * lookup the process specific cfq io context when entered from the block
 * layer.  Also adds @cic to a per-bfqd list, used when this queue is
 * removed.
 */
static int bfq_cic_link(struct bfq_data *bfqd, struct io_context *ioc,
			struct cfq_io_context *cic, gfp_t gfp_mask)
{
	unsigned long flags;
	int ret;

	ret = radix_tree_preload(gfp_mask);
	if (ret == 0) {
		cic->ioc = ioc;

		/* No write-side locking, cic is not published yet. */
		rcu_assign_pointer(cic->key, bfqd);

		spin_lock_irqsave(&ioc->lock, flags);
		ret = radix_tree_insert(&ioc->bfq_radix_root,
					bfqd->cic_index, cic);
		if (ret == 0)
			hlist_add_head_rcu(&cic->cic_list, &ioc->bfq_cic_list);
		spin_unlock_irqrestore(&ioc->lock, flags);

		radix_tree_preload_end();

		if (ret == 0) {
			spin_lock_irqsave(bfqd->queue->queue_lock, flags);
			list_add(&cic->queue_list, &bfqd->cic_list);
			spin_unlock_irqrestore(bfqd->queue->queue_lock, flags);
		}
	}

	if (ret != 0)
		printk(KERN_ERR "bfq: cic link failed!\n");

	return ret;
}
Пример #12
0
/**
 * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache
 * @dmap: destination page cache
 * @smap: source page cache
 *
 * No pages must no be added to the cache during this process.
 * This must be ensured by the caller.
 */
void nilfs_copy_back_pages(struct address_space *dmap,
			   struct address_space *smap)
{
	struct pagevec pvec;
	unsigned int i, n;
	pgoff_t index = 0;
	int err;

	pagevec_init(&pvec);
repeat:
	n = pagevec_lookup(&pvec, smap, &index);
	if (!n)
		return;

	for (i = 0; i < pagevec_count(&pvec); i++) {
		struct page *page = pvec.pages[i], *dpage;
		pgoff_t offset = page->index;

		lock_page(page);
		dpage = find_lock_page(dmap, offset);
		if (dpage) {
			/* override existing page on the destination cache */
			WARN_ON(PageDirty(dpage));
			nilfs_copy_page(dpage, page, 0);
			unlock_page(dpage);
			put_page(dpage);
		} else {
			struct page *page2;

			/* move the page to the destination cache */
			spin_lock_irq(&smap->tree_lock);
			page2 = radix_tree_delete(&smap->page_tree, offset);
			WARN_ON(page2 != page);

			smap->nrpages--;
			spin_unlock_irq(&smap->tree_lock);

			spin_lock_irq(&dmap->tree_lock);
			err = radix_tree_insert(&dmap->page_tree, offset, page);
			if (unlikely(err < 0)) {
				WARN_ON(err == -EEXIST);
				page->mapping = NULL;
				put_page(page); /* for cache */
			} else {
				page->mapping = dmap;
				dmap->nrpages++;
				if (PageDirty(page))
					radix_tree_tag_set(&dmap->page_tree,
							   offset,
							   PAGECACHE_TAG_DIRTY);
			}
			spin_unlock_irq(&dmap->tree_lock);
		}
		unlock_page(page);
	}
	pagevec_release(&pvec);
	cond_resched();

	goto repeat;
}
int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
		   struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
{
	struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
	struct mlx4_cmd_mailbox *mailbox;
	struct mlx4_srq_context *srq_context;
	u64 mtt_addr;
	int err;

	err = mlx4_srq_alloc_icm(dev, &srq->srqn);
	if (err)
		return err;

	spin_lock_irq(&srq_table->lock);
	err = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
	spin_unlock_irq(&srq_table->lock);
	if (err)
		goto err_icm;

	mailbox = mlx4_alloc_cmd_mailbox(dev);
	if (IS_ERR(mailbox)) {
		err = PTR_ERR(mailbox);
		goto err_radix;
	}

	srq_context = mailbox->buf;
	memset(srq_context, 0, sizeof *srq_context);

	srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) |
						      srq->srqn);
	srq_context->logstride          = srq->wqe_shift - 4;
	srq_context->xrcd		= cpu_to_be16(xrcd);
	srq_context->pg_offset_cqn	= cpu_to_be32(cqn & 0xffffff);
	srq_context->log_page_size      = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;

	mtt_addr = mlx4_mtt_addr(dev, mtt);
	srq_context->mtt_base_addr_h    = mtt_addr >> 32;
	srq_context->mtt_base_addr_l    = cpu_to_be32(mtt_addr & 0xffffffff);
	srq_context->pd			= cpu_to_be32(pdn);
	srq_context->db_rec_addr        = cpu_to_be64(db_rec);

	err = mlx4_SW2HW_SRQ(dev, mailbox, srq->srqn);
	mlx4_free_cmd_mailbox(dev, mailbox);
	if (err)
		goto err_radix;

	atomic_set(&srq->refcount, 1);
	init_completion(&srq->free);

	return 0;

err_radix:
	spin_lock_irq(&srq_table->lock);
	radix_tree_delete(&srq_table->tree, srq->srqn);
	spin_unlock_irq(&srq_table->lock);

err_icm:
	mlx4_srq_free_icm(dev, srq->srqn);
	return err;
}
Пример #14
0
/*
 * Insert a write request into an inode
 */
static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
{
	struct nfs_inode *nfsi = NFS_I(inode);
	int error;

	error = radix_tree_preload(GFP_NOFS);
	if (error != 0)
		goto out;

	/* Lock the request! */
	nfs_lock_request_dontget(req);

	spin_lock(&inode->i_lock);
	error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
	BUG_ON(error);
	if (!nfsi->npages) {
		igrab(inode);
		if (nfs_have_delegation(inode, FMODE_WRITE))
			nfsi->change_attr++;
	}
	SetPagePrivate(req->wb_page);
	set_page_private(req->wb_page, (unsigned long)req);
	nfsi->npages++;
	kref_get(&req->wb_kref);
	radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
				NFS_PAGE_TAG_LOCKED);
	spin_unlock(&inode->i_lock);
	radix_tree_preload_end();
out:
	return error;
}
Пример #15
0
/*
 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
 * but sets SwapCache flag and private instead of mapping and index.
 */
static int __add_to_swap_cache(struct page *page, swp_entry_t entry)
{
	int error;

	VM_BUG_ON(!PageLocked(page));
	VM_BUG_ON(PageSwapCache(page));
	VM_BUG_ON(!PageSwapBacked(page));

	page_cache_get(page);
	SetPageSwapCache(page);
	set_page_private(page, entry.val);

	spin_lock_irq(&swapper_space.tree_lock);
	error = radix_tree_insert(&swapper_space.page_tree, entry.val, page);
	if (likely(!error)) {
		total_swapcache_pages++;
		__inc_zone_page_state(page, NR_FILE_PAGES);
		INC_CACHE_INFO(add_total);
	}
	spin_unlock_irq(&swapper_space.tree_lock);

	if (unlikely(error)) {
		/*
		 * Only the context which have set SWAP_HAS_CACHE flag
		 * would call add_to_swap_cache().
		 * So add_to_swap_cache() doesn't returns -EEXIST.
		 */
		VM_BUG_ON(error == -EEXIST);
		set_page_private(page, 0UL);
		ClearPageSwapCache(page);
		page_cache_release(page);
	}

	return error;
}
Пример #16
0
int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
			 struct mlx5_srq_attr *in)
{
	int err;
	struct mlx5_srq_table *table = &dev->priv.srq_table;

	if (in->type == IB_SRQT_XRC)
		srq->common.res = MLX5_RES_XSRQ;
	else
		srq->common.res = MLX5_RES_SRQ;

	err = create_srq_split(dev, srq, in);
	if (err)
		return err;

	atomic_set(&srq->refcount, 1);
	init_completion(&srq->free);

	spin_lock_irq(&table->lock);
	err = radix_tree_insert(&table->tree, srq->srqn, srq);
	spin_unlock_irq(&table->lock);
	if (err) {
		mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
		goto err_destroy_srq_split;
	}

	return 0;

err_destroy_srq_split:
	destroy_srq_split(dev, srq);

	return err;
}
Пример #17
0
/**
 * ima_inode_alloc - allocate an iint associated with an inode
 * @inode: pointer to the inode
 */
int ima_inode_alloc(struct inode *inode)
{
	struct ima_iint_cache *iint = NULL;
	int rc = 0;

	if (!ima_enabled)
		return 0;

	iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
	if (!iint)
		return -ENOMEM;

	rc = radix_tree_preload(GFP_NOFS);
	if (rc < 0)
		goto out;

	spin_lock(&ima_iint_lock);
	rc = radix_tree_insert(&ima_iint_store, (unsigned long)inode, iint);
	spin_unlock(&ima_iint_lock);
	radix_tree_preload_end();
out:
	if (rc < 0)
		kmem_cache_free(iint_cache, iint);

	return rc;
}
Пример #18
0
/* Allocate memory for the iint associated with the inode
 * from the iint_cache slab, initialize the iint, and
 * insert it into the radix tree.
 *
 * On success return a pointer to the iint; on failure return NULL.
 */
struct ima_iint_cache *ima_iint_insert(struct inode *inode)
{
    struct ima_iint_cache *iint = NULL;
    int rc = 0;

    if (!ima_initialized)
        return iint;
    iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
    if (!iint)
        return iint;

    rc = radix_tree_preload(GFP_NOFS);
    if (rc < 0)
        goto out;

    spin_lock(&ima_iint_lock);
    rc = radix_tree_insert(&ima_iint_store, (unsigned long)inode, iint);
    spin_unlock(&ima_iint_lock);
out:
    if (rc < 0) {
        kmem_cache_free(iint_cache, iint);
        if (rc == -EEXIST) {
            spin_lock(&ima_iint_lock);
            iint = radix_tree_lookup(&ima_iint_store,
                                     (unsigned long)inode);
            spin_unlock(&ima_iint_lock);
        } else
            iint = NULL;
    }
    radix_tree_preload_end();
    return iint;
}
Пример #19
0
/**
 * hwspin_lock_register() - register a new hw spinlock
 * @hwlock: hwspinlock to register.
 *
 * This function should be called from the underlying platform-specific
 * implementation, to register a new hwspinlock instance.
 *
 * Can be called from an atomic context (will not sleep) but not from
 * within interrupt context.
 *
 * Returns 0 on success, or an appropriate error code on failure
 */
int hwspin_lock_register(struct hwspinlock *hwlock)
{
	struct hwspinlock *tmp;
	int ret;

	if (!hwlock || !hwlock->ops ||
		!hwlock->ops->trylock || !hwlock->ops->unlock) {
		pr_err("invalid parameters\n");
		return -EINVAL;
	}

	spin_lock_init(&hwlock->lock);

	spin_lock(&hwspinlock_tree_lock);

	ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock);
	if (ret)
		goto out;

	/* mark this hwspinlock as available */
	tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id,
							HWSPINLOCK_UNUSED);

	/* self-sanity check which should never fail */
	WARN_ON(tmp != hwlock);

out:
	spin_unlock(&hwspinlock_tree_lock);
	return ret;
}
Пример #20
0
/*
 * __add_to_swap_cache resembles add_to_page_cache on swapper_space,
 * but sets SwapCache flag and private instead of mapping and index.
 */
static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
			       gfp_t gfp_mask)
{
	int error;

	BUG_ON(PageSwapCache(page));
	BUG_ON(PagePrivate(page));
	error = radix_tree_preload(gfp_mask);
	if (!error) {
		set_page_no_new_refs(page);
		write_lock_irq(&swapper_space.tree_lock);
		error = radix_tree_insert(&swapper_space.page_tree,
						entry.val, page);
		if (!error) {
			page_cache_get(page);
			SetPageLocked(page);
			SetPageSwapCache(page);
			set_page_private(page, entry.val);
			total_swapcache_pages++;
			__inc_zone_page_state(page, NR_FILE_PAGES);
		}
		write_unlock_irq(&swapper_space.tree_lock);
		end_page_no_new_refs(page);
		radix_tree_preload_end();
	}
	return error;
}
Пример #21
0
/*
 * remove an extent from the root, returns 0 on success
 */
static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
			 *root, u64 blocknr, u64 num_blocks, int pin)
{
	struct btrfs_path path;
	struct btrfs_key key;
	struct btrfs_fs_info *info = root->fs_info;
	struct btrfs_root *extent_root = info->extent_root;
	int ret;
	struct btrfs_extent_item *ei;
	struct btrfs_key ins;
	u32 refs;

	BUG_ON(pin && num_blocks != 1);
	key.objectid = blocknr;
	key.flags = 0;
	btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
	key.offset = num_blocks;

	find_free_extent(trans, root, 0, 0, (u64)-1, &ins);
	btrfs_init_path(&path);
	ret = btrfs_search_slot(trans, extent_root, &key, &path, -1, 1);
	if (ret) {
		btrfs_print_tree(extent_root, extent_root->node);
		printf("failed to find %llu\n",
		       (u64)key.objectid);
		BUG();
	}
	ei = btrfs_item_ptr(&path.nodes[0]->leaf, path.slots[0],
			    struct btrfs_extent_item);
	BUG_ON(ei->refs == 0);
	refs = btrfs_extent_refs(ei) - 1;
	btrfs_set_extent_refs(ei, refs);
	if (refs == 0) {
		u64 super_blocks_used;
		if (pin) {
			int err;
			unsigned long bl = blocknr;
			radix_tree_preload(GFP_KERNEL);
			err = radix_tree_insert(&info->pinned_radix,
						blocknr, (void *)bl);
			BUG_ON(err);
			radix_tree_preload_end();
		}
		super_blocks_used = btrfs_super_blocks_used(info->disk_super);
		btrfs_set_super_blocks_used(info->disk_super,
					    super_blocks_used - num_blocks);
		ret = btrfs_del_item(trans, extent_root, &path);
		if (!pin && extent_root->fs_info->last_insert.objectid >
		    blocknr)
			extent_root->fs_info->last_insert.objectid = blocknr;
		if (ret)
			BUG();
		ret = update_block_group(trans, root, blocknr, num_blocks, 0);
		BUG_ON(ret);
	}
	btrfs_release_path(extent_root, &path);
	finish_current_insert(trans, extent_root);
	return ret;
}
Пример #22
0
void regression2_test(void)
{
	int i;
	struct page *p;
	int max_slots = RADIX_TREE_MAP_SIZE;
	unsigned long int start, end;
	struct page *pages[1];

	printf("running regression test 2 (should take milliseconds)\n");
	/* 0. */
	for (i = 0; i <= max_slots - 1; i++) {
		p = page_alloc();
		radix_tree_insert(&mt_tree, i, p);
	}
	radix_tree_tag_set(&mt_tree, max_slots - 1, PAGECACHE_TAG_DIRTY);

	/* 1. */
	start = 0;
	end = max_slots - 2;
	radix_tree_range_tag_if_tagged(&mt_tree, &start, end, 1,
				PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);

	/* 2. */
	p = page_alloc();
	radix_tree_insert(&mt_tree, max_slots, p);

	/* 3. */
	radix_tree_tag_clear(&mt_tree, max_slots - 1, PAGECACHE_TAG_DIRTY);

	/* 4. */
	for (i = max_slots - 1; i >= 0; i--)
		radix_tree_delete(&mt_tree, i);

	/* 5. */
	// NOTE: start should not be 0 because radix_tree_gang_lookup_tag_slot
	//       can return.
	start = 1;
	end = max_slots - 2;
	radix_tree_gang_lookup_tag_slot(&mt_tree, (void ***)pages, start, end,
		PAGECACHE_TAG_TOWRITE);

	/* We remove all the remained nodes */
	radix_tree_delete(&mt_tree, max_slots);

	printf("regression test 2, done\n");
}
Пример #23
0
int add_cu_mapping(unsigned long addr, struct compilation_unit *cu)
{
	int result;

	pthread_rwlock_wrlock(&cu_map_rwlock);
	result = radix_tree_insert(cu_map, addr, cu);
	pthread_rwlock_unlock(&cu_map_rwlock);

	return result;
}
Пример #24
0
int nilfs_btnode_prepare_change_key(struct address_space *btnc,
				    struct nilfs_btnode_chkey_ctxt *ctxt)
{
	struct buffer_head *obh, *nbh;
	struct inode *inode = NILFS_BTNC_I(btnc);
	__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
	int err;

	if (oldkey == newkey)
		return 0;

	obh = ctxt->bh;
	ctxt->newbh = NULL;

	if (inode->i_blkbits == PAGE_CACHE_SHIFT) {
		lock_page(obh->b_page);
retry:
		err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
		if (err)
			goto failed_unlock;
		
		if (unlikely(oldkey != obh->b_page->index))
			NILFS_PAGE_BUG(obh->b_page,
				       "invalid oldkey %lld (newkey=%lld)",
				       (unsigned long long)oldkey,
				       (unsigned long long)newkey);

		spin_lock_irq(&btnc->tree_lock);
		err = radix_tree_insert(&btnc->page_tree, newkey, obh->b_page);
		spin_unlock_irq(&btnc->tree_lock);
		radix_tree_preload_end();
		if (!err)
			return 0;
		else if (err != -EEXIST)
			goto failed_unlock;

		err = invalidate_inode_pages2_range(btnc, newkey, newkey);
		if (!err)
			goto retry;
		
		unlock_page(obh->b_page);
	}

	nbh = nilfs_btnode_create_block(btnc, newkey);
	if (!nbh)
		return -ENOMEM;

	BUG_ON(nbh == obh);
	ctxt->newbh = nbh;
	return 0;

 failed_unlock:
	unlock_page(obh->b_page);
	return err;
}
Пример #25
0
int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
                          struct mlx5_core_mkey *mkey,
                          struct mlx5_create_mkey_mbox_in *in, int inlen,
                          mlx5_cmd_cbk_t callback, void *context,
                          struct mlx5_create_mkey_mbox_out *out)
{
    struct mlx5_mkey_table *table = &dev->priv.mkey_table;
    struct mlx5_create_mkey_mbox_out lout;
    int err;
    u8 key;

    memset(&lout, 0, sizeof(lout));
    spin_lock_irq(&dev->priv.mkey_lock);
    key = dev->priv.mkey_key++;
    spin_unlock_irq(&dev->priv.mkey_lock);
    in->seg.qpn_mkey7_0 |= cpu_to_be32(key);
    in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY);
    if (callback) {
        err = mlx5_cmd_exec_cb(dev, in, inlen, out, sizeof(*out),
                               callback, context);
        return err;
    } else {
        err = mlx5_cmd_exec(dev, in, inlen, &lout, sizeof(lout));
    }

    if (err) {
        mlx5_core_dbg(dev, "cmd exec failed %d\n", err);
        return err;
    }

    if (lout.hdr.status) {
        mlx5_core_dbg(dev, "status %d\n", lout.hdr.status);
        return mlx5_cmd_status_to_err(&lout.hdr);
    }

    mkey->iova = be64_to_cpu(in->seg.start_addr);
    mkey->size = be64_to_cpu(in->seg.len);
    mkey->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key;
    mkey->pd = be32_to_cpu(in->seg.flags_pd) & 0xffffff;

    mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
                  be32_to_cpu(lout.mkey), key, mkey->key);

    /* connect to mkey tree */
    write_lock_irq(&table->lock);
    err = radix_tree_insert(&table->tree, mlx5_base_mkey(mkey->key), mkey);
    write_unlock_irq(&table->lock);
    if (err) {
        mlx5_core_warn(dev, "failed radix tree insert of mkey 0x%x, %d\n",
                       mlx5_base_mkey(mkey->key), err);
        mlx5_core_destroy_mkey(dev, mkey);
    }

    return err;
}
Пример #26
0
int mlx5_core_create_dct(struct mlx5_core_dev *dev,
			 struct mlx5_core_dct *dct,
			 struct mlx5_create_dct_mbox_in *in)
{
	struct mlx5_qp_table *table = &dev->priv.qp_table;
	struct mlx5_create_dct_mbox_out out;
	struct mlx5_destroy_dct_mbox_in din;
	struct mlx5_destroy_dct_mbox_out dout;
	int err;

	init_completion(&dct->drained);
	memset(&out, 0, sizeof(out));
	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_DCT);

	err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
	if (err) {
		mlx5_core_warn(dev, "create DCT failed, ret %d", err);
		return err;
	}

	if (out.hdr.status)
		return mlx5_cmd_status_to_err(dev, &out.hdr);

	dct->dctn = be32_to_cpu(out.dctn) & 0xffffff;

	dct->common.res = MLX5_RES_DCT;
	spin_lock_irq(&table->lock);
	err = radix_tree_insert(&table->tree, dct->dctn, dct);
	spin_unlock_irq(&table->lock);
	if (err) {
		mlx5_core_warn(dev, "err %d", err);
		goto err_cmd;
	}

	err = mlx5_debug_dct_add(dev, dct);
	if (err)
		mlx5_core_dbg(dev, "failed adding DCT 0x%x to debug file system\n",
			      dct->dctn);

	dct->pid = current->pid;
	atomic_set(&dct->common.refcount, 1);
	init_completion(&dct->common.free);

	return 0;

err_cmd:
	memset(&din, 0, sizeof(din));
	memset(&dout, 0, sizeof(dout));
	din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_DCT);
	din.dctn = cpu_to_be32(dct->dctn);
	mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));

	return err;
}
Пример #27
0
// ARM10C 20141004
// i: 0, desc: kmem_cache#28-o0
// ARM10C 20141004
// i: 1, desc: kmem_cache#28-o1
// ARM10C 20141115
// 16, desc: kmem_cache#28-oX (irq 16)
// ARM10C 20141115
// 64, desc: kmem_cache#28-oX (irq 64)
static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
{
	// irq: 0, desc: kmem_cache#28-o0
	// irq: 1, desc: kmem_cache#28-o1
	// irq: 16, desc: kmem_cache#28-oX (irq 16)
	// irq: 64, desc: kmem_cache#28-oX (irq 64)
	radix_tree_insert(&irq_desc_tree, irq, desc);
	// radix tree에 kmem_cache#28-o0를 노드로 추가
	// radix tree에 kmem_cache#28-o1를 노드로 추가
	// radix tree에 kmem_cache#28-oX를 노드로 추가
}
Пример #28
0
static int _add_to_page_cache(struct page *page, struct address_space *mapping, unsigned int offset)
{
    get_page(page);

    if((radix_tree_insert(&mapping->page_tree, offset, page)) < 0)
        return -1;

    __add_to_page_cache(page, mapping, offset);

    return 0;
}
Пример #29
0
Файл: mr.c Проект: 168519/linux
static void reg_mr_callback(int status, void *context)
{
	struct mlx5_ib_mr *mr = context;
	struct mlx5_ib_dev *dev = mr->dev;
	struct mlx5_mr_cache *cache = &dev->cache;
	int c = order2idx(dev, mr->order);
	struct mlx5_cache_ent *ent = &cache->ent[c];
	u8 key;
	unsigned long flags;
	struct mlx5_mr_table *table = &dev->mdev->priv.mr_table;
	int err;

	spin_lock_irqsave(&ent->lock, flags);
	ent->pending--;
	spin_unlock_irqrestore(&ent->lock, flags);
	if (status) {
		mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
		kfree(mr);
		dev->fill_delay = 1;
		mod_timer(&dev->delay_timer, jiffies + HZ);
		return;
	}

	if (mr->out.hdr.status) {
		mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n",
			     mr->out.hdr.status,
			     be32_to_cpu(mr->out.hdr.syndrome));
		kfree(mr);
		dev->fill_delay = 1;
		mod_timer(&dev->delay_timer, jiffies + HZ);
		return;
	}

	spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
	key = dev->mdev->priv.mkey_key++;
	spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
	mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;

	cache->last_add = jiffies;

	spin_lock_irqsave(&ent->lock, flags);
	list_add_tail(&mr->list, &ent->head);
	ent->cur++;
	ent->size++;
	spin_unlock_irqrestore(&ent->lock, flags);

	write_lock_irqsave(&table->lock, flags);
	err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmr.key),
				&mr->mmr);
	if (err)
		pr_err("Error inserting to mr tree. 0x%x\n", -err);
	write_unlock_irqrestore(&table->lock, flags);
}
Пример #30
0
int btrfs_read_block_groups(struct btrfs_root *root)
{
	struct btrfs_path path;
	int ret;
	int err = 0;
	struct btrfs_block_group_item *bi;
	struct btrfs_block_group_cache *cache;
	struct btrfs_key key;
	struct btrfs_key found_key;
	struct btrfs_leaf *leaf;
	u64 group_size_blocks = BTRFS_BLOCK_GROUP_SIZE / root->blocksize;

	root = root->fs_info->extent_root;
	key.objectid = 0;
	key.offset = group_size_blocks;
	key.flags = 0;
	btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
	btrfs_init_path(&path);

	while(1) {
		ret = btrfs_search_slot(NULL, root->fs_info->extent_root,
					&key, &path, 0, 0);
		if (ret != 0) {
			err = ret;
			break;
		}
		leaf = &path.nodes[0]->leaf;
		btrfs_disk_key_to_cpu(&found_key,
				      &leaf->items[path.slots[0]].key);
		cache = malloc(sizeof(*cache));
		if (!cache) {
			err = -1;
			break;
		}
		bi = btrfs_item_ptr(leaf, path.slots[0],
				    struct btrfs_block_group_item);
		memcpy(&cache->item, bi, sizeof(*bi));
		memcpy(&cache->key, &found_key, sizeof(found_key));
		key.objectid = found_key.objectid + found_key.offset;
		btrfs_release_path(root, &path);
		ret = radix_tree_insert(&root->fs_info->block_group_radix,
					found_key.objectid +
					found_key.offset - 1, (void *)cache);
		BUG_ON(ret);
		if (key.objectid >=
		    btrfs_super_total_blocks(root->fs_info->disk_super))
			break;
	}
	btrfs_release_path(root, &path);
	return 0;
}