/*
 * This must be called only on pages that have
 * been verified to be in the swap cache.
 */
void __delete_from_swap_cache(struct page *page)
{
	BUG_ON(!PageLocked(page));
	BUG_ON(!PageSwapCache(page));
	BUG_ON(PageWriteback(page));
	BUG_ON(PagePrivate(page));

	radix_tree_delete(&swapper_space.page_tree, page_private(page));
	set_page_private(page, 0);
	ClearPageSwapCache(page);
	total_swapcache_pages--;
	pagecache_acct(-1);
	INC_CACHE_INFO(del_total);
}
/*
 * This must be called only on pages that have
 * been verified to be in the swap cache.
 */
void __delete_from_swap_cache(struct page *page)
{
	VM_BUG_ON(!PageLocked(page));
	VM_BUG_ON(!PageSwapCache(page));
	VM_BUG_ON(PageWriteback(page));

	radix_tree_delete(&swapper_space.page_tree, page_private(page));
	set_page_private(page, 0);
	ClearPageSwapCache(page);
	total_swapcache_pages--;
	__dec_zone_page_state(page, NR_FILE_PAGES);
	__dec_zone_page_state(page, NR_SWAPCACHE);
	INC_CACHE_INFO(del_total);
}
Пример #3
0
/*
 * decide whether a page can be released, possibly by cancelling a store to it
 * - we're allowed to sleep if __GFP_DIRECT_RECLAIM is flagged
 */
bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
				  struct page *page,
				  gfp_t gfp)
{
	struct page *xpage;
	void *val;

	_enter("%p,%p,%x", cookie, page, gfp);

try_again:
	rcu_read_lock();
	val = radix_tree_lookup(&cookie->stores, page->index);
	if (!val) {
		rcu_read_unlock();
		fscache_stat(&fscache_n_store_vmscan_not_storing);
		__fscache_uncache_page(cookie, page);
		return true;
	}

	/* see if the page is actually undergoing storage - if so we can't get
	 * rid of it till the cache has finished with it */
	if (radix_tree_tag_get(&cookie->stores, page->index,
			       FSCACHE_COOKIE_STORING_TAG)) {
		rcu_read_unlock();
		goto page_busy;
	}

	/* the page is pending storage, so we attempt to cancel the store and
	 * discard the store request so that the page can be reclaimed */
	spin_lock(&cookie->stores_lock);
	rcu_read_unlock();

	if (radix_tree_tag_get(&cookie->stores, page->index,
			       FSCACHE_COOKIE_STORING_TAG)) {
		/* the page started to undergo storage whilst we were looking,
		 * so now we can only wait or return */
		spin_unlock(&cookie->stores_lock);
		goto page_busy;
	}

	xpage = radix_tree_delete(&cookie->stores, page->index);
	spin_unlock(&cookie->stores_lock);

	if (xpage) {
		fscache_stat(&fscache_n_store_vmscan_cancelled);
		fscache_stat(&fscache_n_store_radix_deletes);
		ASSERTCMP(xpage, ==, page);
	} else {
Пример #4
0
int main(void)
{
    char *test[] = {"abc", "def", "ghi", "jkl", "mno", "pqr", "stu", "vwx", "yz0",
                    "123", "456", "789", "zyx", "wvu", "tsr", "qpo", "nml", "kji"};

    int i = 0;
    int num = sizeof(test)/sizeof(*test);

    printf("num:%d\n", num);

    radix_tree_head *head = radix_tree_head_new();
    radix_tree_initial(head);

    if(!head)
    {
        printf("alloc head failed\n");
    }

    ///插入数据测试
    for(i = 0; i < num; i++)
    {
        radix_tree_insert(head, i, test[i]);
    }


    /// 从指定的index开始,查找固定max_items个数据
    void *result[5];

    radix_tree_gang_lookup(head,result,2,5); //从第二个之后开始找

    for(int i=0;i<6;i++)
    {
        printf("%d :%s\n",i,result[i]);
    }

    ///从指定位置查找数据
    for(i = 0; i < num; i++)
    {
        printf("%s\n",(char*) radix_tree_lookup(head, i));
    }

    for(i = 0; i < num; i++)
    {
        radix_tree_delete(head, i);
    }

    return 0;
}
Пример #5
0
static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
{
	struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
	struct mlx5e_vxlan *vxlan;

	spin_lock_irq(&vxlan_db->lock);
	vxlan = radix_tree_delete(&vxlan_db->tree, port);
	spin_unlock_irq(&vxlan_db->lock);

	if (!vxlan)
		return;

	mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port);

	kfree(vxlan);
}
Пример #6
0
STATIC void
xfs_qm_dqfree_one(
	struct xfs_dquot	*dqp)
{
	struct xfs_mount	*mp = dqp->q_mount;
	struct xfs_quotainfo	*qi = mp->m_quotainfo;

	mutex_lock(&qi->qi_tree_lock);
	radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags),
			  be32_to_cpu(dqp->q_core.d_id));

	qi->qi_dquots--;
	mutex_unlock(&qi->qi_tree_lock);

	xfs_qm_dqdestroy(dqp);
}
Пример #7
0
static void *writer_fn(void *arg)
{
	int i;

	rcu_register_thread();
	pthread_barrier_wait(&worker_barrier);

	for (i = 0; i < 1000000; i++) {
		radix_tree_insert(&mt_tree, 1, &obj1);
		radix_tree_delete(&mt_tree, 1);
	}

	rcu_unregister_thread();

	return NULL;
}
Пример #8
0
bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
				  struct page *page,
				  gfp_t gfp)
{
	struct page *xpage;
	void *val;

	_enter("%p,%p,%x", cookie, page, gfp);

	rcu_read_lock();
	val = radix_tree_lookup(&cookie->stores, page->index);
	if (!val) {
		rcu_read_unlock();
		fscache_stat(&fscache_n_store_vmscan_not_storing);
		__fscache_uncache_page(cookie, page);
		return true;
	}

	/*                                                                    
                                                  */
	if (radix_tree_tag_get(&cookie->stores, page->index,
			       FSCACHE_COOKIE_STORING_TAG)) {
		rcu_read_unlock();
		goto page_busy;
	}

	/*                                                                   
                                                                */
	spin_lock(&cookie->stores_lock);
	rcu_read_unlock();

	if (radix_tree_tag_get(&cookie->stores, page->index,
			       FSCACHE_COOKIE_STORING_TAG)) {
		/*                                                            
                                       */
		spin_unlock(&cookie->stores_lock);
		goto page_busy;
	}

	xpage = radix_tree_delete(&cookie->stores, page->index);
	spin_unlock(&cookie->stores_lock);

	if (xpage) {
		fscache_stat(&fscache_n_store_vmscan_cancelled);
		fscache_stat(&fscache_n_store_radix_deletes);
		ASSERTCMP(xpage, ==, page);
	} else {
Пример #9
0
void nilfs_btnode_abort_change_key(struct address_space *btnc,
				   struct nilfs_btnode_chkey_ctxt *ctxt)
{
	struct buffer_head *nbh = ctxt->newbh;
	__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;

	if (oldkey == newkey)
		return;

	if (nbh == NULL) {	
		spin_lock_irq(&btnc->tree_lock);
		radix_tree_delete(&btnc->page_tree, newkey);
		spin_unlock_irq(&btnc->tree_lock);
		unlock_page(ctxt->bh->b_page);
	} else
		brelse(nbh);
}
Пример #10
0
/*
 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
 * but sets SwapCache flag and private instead of mapping and index.
 */
int __add_to_swap_cache(struct page *page, swp_entry_t entry)
{
	int error, i, nr = hpage_nr_pages(page);
	struct address_space *address_space;
	pgoff_t idx = swp_offset(entry);

	VM_BUG_ON_PAGE(!PageLocked(page), page);
	VM_BUG_ON_PAGE(PageSwapCache(page), page);
	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);

	page_ref_add(page, nr);
	SetPageSwapCache(page);

	address_space = swap_address_space(entry);
	spin_lock_irq(&address_space->tree_lock);
	for (i = 0; i < nr; i++) {
		set_page_private(page + i, entry.val + i);
		error = radix_tree_insert(&address_space->page_tree,
					  idx + i, page + i);
		if (unlikely(error))
			break;
	}
	if (likely(!error)) {
		address_space->nrpages += nr;
		__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
		ADD_CACHE_INFO(add_total, nr);
	} else {
		/*
		 * Only the context which have set SWAP_HAS_CACHE flag
		 * would call add_to_swap_cache().
		 * So add_to_swap_cache() doesn't returns -EEXIST.
		 */
		VM_BUG_ON(error == -EEXIST);
		set_page_private(page + i, 0UL);
		while (i--) {
			radix_tree_delete(&address_space->page_tree, idx + i);
			set_page_private(page + i, 0UL);
		}
		ClearPageSwapCache(page);
		page_ref_sub(page, nr);
	}
	spin_unlock_irq(&address_space->tree_lock);

	return error;
}
Пример #11
0
/*
 * This must be called only on pages that have
 * been verified to be in the swap cache.
 */
void __delete_from_swap_cache(struct page *page)
{
    swp_entry_t entry;
    struct address_space *address_space;

    VM_BUG_ON_PAGE(!PageLocked(page), page);
    VM_BUG_ON_PAGE(!PageSwapCache(page), page);
    VM_BUG_ON_PAGE(PageWriteback(page), page);

    entry.val = page_private(page);
    address_space = swap_address_space(entry);
    radix_tree_delete(&address_space->page_tree, page_private(page));
    set_page_private(page, 0);
    ClearPageSwapCache(page);
    address_space->nrpages--;
    __dec_zone_page_state(page, NR_FILE_PAGES);
    INC_CACHE_INFO(del_total);
}
Пример #12
0
struct mpls_ilm* 
mpls_remove_ilm (unsigned int key)
{
	struct mpls_ilm *ilm = NULL;

	MPLS_ENTER;
	ilm = radix_tree_delete (&mpls_ilm_tree, key);
	if (!ilm) {
		MPLS_DEBUG("node key %u not found.\n",key);
		return NULL;
	}

	list_del_rcu(&ilm->global);
	mpls_ilm_release (ilm);

	MPLS_EXIT;
	return ilm;
}
Пример #13
0
/**
 * kill_cursor - free dir_cursor and reiser4_file_fsdata attached to it
 * @cursor: cursor to free
 *
 * Removes reiser4_file_fsdata attached to @cursor from readdir list of
 * reiser4_inode, frees that reiser4_file_fsdata. Removes @cursor from from
 * indices, hash table, list of unused cursors and frees it.
 */
static void kill_cursor(dir_cursor *cursor)
{
	unsigned long index;

	assert("nikita-3566", cursor->ref == 0);
	assert("nikita-3572", cursor->fsdata != NULL);

	index = (unsigned long)cursor->key.oid;
	list_del_init(&cursor->fsdata->dir.linkage);
	free_fsdata(cursor->fsdata);
	cursor->fsdata = NULL;

	if (list_empty_careful(&cursor->list))
		/* this is last cursor for a file. Kill radix-tree entry */
		radix_tree_delete(&cursor->info->tree, index);
	else {
		void **slot;

		/*
		 * there are other cursors for the same oid.
		 */

		/*
		 * if radix tree point to the cursor being removed, re-target
		 * radix tree slot to the next cursor in the (non-empty as was
		 * checked above) element of the circular list of all cursors
		 * for this oid.
		 */
		slot = radix_tree_lookup_slot(&cursor->info->tree, index);
		assert("nikita-3571", *slot != NULL);
		if (*slot == cursor)
			*slot = list_entry(cursor->list.next, dir_cursor, list);
		/* remove cursor from circular list */
		list_del_init(&cursor->list);
	}
	/* remove cursor from the list of unused cursors */
	list_del_init(&cursor->alist);
	/* remove cursor from the hash table */
	d_cursor_hash_remove(&cursor->info->table, cursor);
	/* and free it */
	kmem_cache_free(d_cursor_cache, cursor);
	--d_cursor_unused;
}
Пример #14
0
struct mpls_nhlfe* 
mpls_remove_nhlfe (unsigned int key)
{
	struct mpls_nhlfe *nhlfe = NULL;

	MPLS_ENTER;

	nhlfe = radix_tree_delete(&mpls_nhlfe_tree, key);
	if (!nhlfe)
		MPLS_DEBUG("NHLFE node with key %u not found.\n",key);

	list_del_rcu(&nhlfe->global);

	/* release the refcnt for the tree hold it */
	mpls_nhlfe_release (nhlfe);

	MPLS_EXIT;
	return nhlfe;
}
void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
{
	struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
	int err;

	err = mlx4_HW2SW_SRQ(dev, NULL, srq->srqn);
	if (err)
		mlx4_warn(dev, "HW2SW_SRQ failed (%d) for SRQN %06x\n", err, srq->srqn);

	spin_lock_irq(&srq_table->lock);
	radix_tree_delete(&srq_table->tree, srq->srqn);
	spin_unlock_irq(&srq_table->lock);

	if (atomic_dec_and_test(&srq->refcount))
		complete(&srq->free);
	wait_for_completion(&srq->free);

	mlx4_srq_free_icm(dev, srq->srqn);
}
Пример #16
0
/*
 * Remove a write request from an inode
 */
static void nfs_inode_remove_request(struct nfs_page *req)
{
	struct inode *inode = req->wb_context->path.dentry->d_inode;
	struct nfs_inode *nfsi = NFS_I(inode);

	BUG_ON (!NFS_WBACK_BUSY(req));

	spin_lock(&inode->i_lock);
	set_page_private(req->wb_page, 0);
	ClearPagePrivate(req->wb_page);
	radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
	nfsi->npages--;
	if (!nfsi->npages) {
		spin_unlock(&inode->i_lock);
		iput(inode);
	} else
		spin_unlock(&inode->i_lock);
	nfs_clear_request(req);
	nfs_release_request(req);
}
Пример #17
0
int btrfs_free_block_groups(struct btrfs_fs_info *info)
{
	int ret;
	struct btrfs_block_group_cache *cache[8];
	int i;

	while(1) {
		ret = radix_tree_gang_lookup(&info->block_group_radix,
					     (void **)cache, 0,
					     ARRAY_SIZE(cache));
		if (!ret)
			break;
		for (i = 0; i < ret; i++) {
			radix_tree_delete(&info->block_group_radix,
					  cache[i]->key.objectid +
					  cache[i]->key.offset - 1);
			free(cache[i]);
		}
	}
	return 0;
}
Пример #18
0
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq, int flags)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	struct mlx4_cq_table *cq_table = &priv->cq_table;
	int err;

	err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
	if (err)
		mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);

	synchronize_irq(priv->eq_table.eq[cq->vector].irq);
	spin_lock_irq(&cq_table->lock);
	radix_tree_delete(&cq_table->tree, cq->cqn);
	spin_unlock_irq(&cq_table->lock);
	if (flags & MLX4_RCU_USE_EXPEDITED)
		synchronize_rcu_expedited();
	else
		synchronize_rcu();

	mlx4_cq_free_icm(dev, cq->cqn);
}
Пример #19
0
Файл: cq.c Проект: 020gzh/linux
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
{
	struct mlx5_cq_table *table = &dev->priv.cq_table;
	struct mlx5_destroy_cq_mbox_in in;
	struct mlx5_destroy_cq_mbox_out out;
	struct mlx5_core_cq *tmp;
	int err;

	spin_lock_irq(&table->lock);
	tmp = radix_tree_delete(&table->tree, cq->cqn);
	spin_unlock_irq(&table->lock);
	if (!tmp) {
		mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn);
		return -EINVAL;
	}
	if (tmp != cq) {
		mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn);
		return -EINVAL;
	}

	memset(&in, 0, sizeof(in));
	memset(&out, 0, sizeof(out));
	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
	in.cqn = cpu_to_be32(cq->cqn);
	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
	if (err)
		return err;

	if (out.hdr.status)
		return mlx5_cmd_status_to_err(&out.hdr);

	synchronize_irq(cq->irqn);

	mlx5_debug_cq_remove(dev, cq);
	if (atomic_dec_and_test(&cq->refcount))
		complete(&cq->free);
	wait_for_completion(&cq->free);

	return 0;
}
Пример #20
0
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
{
    struct mlx4_priv *priv = mlx4_priv(dev);
    struct mlx4_cq_table *cq_table = &priv->cq_table;
    int err;

    err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
    if (err)
        mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);

    synchronize_irq(priv->eq_table.eq[cq->vector].irq);

    spin_lock_irq(&cq_table->lock);
    radix_tree_delete(&cq_table->tree, cq->cqn);
    spin_unlock_irq(&cq_table->lock);

    if (atomic_dec_and_test(&cq->refcount))
        complete(&cq->free);
    wait_for_completion(&cq->free);

    mlx4_cq_free_icm(dev, cq->cqn);
}
Пример #21
0
Файл: dir.c Проект: NVSL/NOVA
void nova_delete_dir_tree(struct super_block *sb,
	struct nova_inode_info_header *sih)
{
	struct nova_dentry *direntry;
	unsigned long pos = 0;
	struct nova_dentry *entries[FREE_BATCH];
	timing_t delete_time;
	int nr_entries;
	int i;
	void *ret;

	NOVA_START_TIMING(delete_dir_tree_t, delete_time);

	do {
		nr_entries = radix_tree_gang_lookup(&sih->tree,
					(void **)entries, pos, FREE_BATCH);
		for (i = 0; i < nr_entries; i++) {
			direntry = entries[i];
			BUG_ON(!direntry);
			pos = BKDRHash(direntry->name, direntry->name_len);
			ret = radix_tree_delete(&sih->tree, pos);
			if (!ret || ret != direntry) {
				nova_err(sb, "dentry: type %d, inode %llu, "
					"name %s, namelen %u, rec len %u\n",
					direntry->entry_type,
					le64_to_cpu(direntry->ino),
					direntry->name, direntry->name_len,
					le16_to_cpu(direntry->de_len));
				if (!ret)
					nova_dbg("ret is NULL\n");
			}
		}
		pos++;
	} while (nr_entries == FREE_BATCH);

	NOVA_END_TIMING(delete_dir_tree_t, delete_time);
	return;
}
Пример #22
0
/*
 * This must be called only on pages that have
 * been verified to be in the swap cache.
 */
void __delete_from_swap_cache(struct page *page)
{
	struct address_space *address_space;
	int i, nr = hpage_nr_pages(page);
	swp_entry_t entry;
	pgoff_t idx;

	VM_BUG_ON_PAGE(!PageLocked(page), page);
	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
	VM_BUG_ON_PAGE(PageWriteback(page), page);

	entry.val = page_private(page);
	address_space = swap_address_space(entry);
	idx = swp_offset(entry);
	for (i = 0; i < nr; i++) {
		radix_tree_delete(&address_space->page_tree, idx + i);
		set_page_private(page + i, 0);
	}
	ClearPageSwapCache(page);
	address_space->nrpages -= nr;
	__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
	ADD_CACHE_INFO(del_total, nr);
}
Пример #23
0
/*
 * Release any resource obtained during a mount.
 */
void
libxfs_umount(xfs_mount_t *mp)
{
	struct xfs_perag	*pag;
	int			agno;

	libxfs_rtmount_destroy(mp);
	libxfs_bcache_purge();

	for (agno = 0; agno < mp->m_maxagi; agno++) {
		pag = radix_tree_delete(&mp->m_perag_tree, agno);
		kmem_free(pag);
	}

	kmem_free(mp->m_attr_geo);
	kmem_free(mp->m_dir_geo);

	kmem_free(mp->m_rtdev_targp);
	if (mp->m_logdev_targp != mp->m_ddev_targp)
		kmem_free(mp->m_logdev_targp);
	kmem_free(mp->m_ddev_targp);
	
}
Пример #24
0
void __delete_from_page_cache(struct page *page)
{
	struct address_space *mapping = page->mapping;

	if (PageUptodate(page) && PageMappedToDisk(page))
		cleancache_put_page(page);
	else
		cleancache_invalidate_page(mapping, page);

	radix_tree_delete(&mapping->page_tree, page->index);
	page->mapping = NULL;
	
	mapping->nrpages--;
	__dec_zone_page_state(page, NR_FILE_PAGES);
	if (PageSwapBacked(page))
		__dec_zone_page_state(page, NR_SHMEM);
	BUG_ON(page_mapped(page));

	if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
		dec_zone_page_state(page, NR_FILE_DIRTY);
		dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
	}
}
Пример #25
0
Файл: dir.c Проект: NVSL/NOVA
static int nova_remove_dir_radix_tree(struct super_block *sb,
	struct nova_inode_info_header *sih, const char *name, int namelen,
	int replay)
{
	struct nova_dentry *entry;
	unsigned long hash;

	hash = BKDRHash(name, namelen);
	entry = radix_tree_delete(&sih->tree, hash);

	if (replay == 0) {
		if (!entry) {
			nova_dbg("%s ERROR: %s, length %d, hash %lu\n",
					__func__, name, namelen, hash);
			return -EINVAL;
		}

		if (entry->ino == 0 || entry->invalid ||
		    nova_check_dentry_match(sb, entry, name, namelen)) {
			nova_dbg("%s dentry not match: %s, length %d, "
					"hash %lu\n", __func__, name,
					namelen, hash);
			nova_dbg("dentry: type %d, inode %llu, name %s, "
					"namelen %u, rec len %u\n",
					entry->entry_type,
					le64_to_cpu(entry->ino),
					entry->name, entry->name_len,
					le16_to_cpu(entry->de_len));
			return -EINVAL;
		}

		/* No need to flush */
		entry->invalid = 1;
	}

	return 0;
}
/**
 * nilfs_btnode_commit_change_key
 *  commit the change_key operation prepared by prepare_change_key().
 */
void nilfs_btnode_commit_change_key(struct address_space *btnc,
				    struct nilfs_btnode_chkey_ctxt *ctxt)
{
	struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh;
	__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
	struct page *opage;

	if (oldkey == newkey)
		return;

	if (nbh == NULL) {	/* blocksize == pagesize */
		opage = obh->b_page;
		if (unlikely(oldkey != opage->index))
			NILFS_PAGE_BUG(opage,
				       "invalid oldkey %lld (newkey=%lld)",
				       (unsigned long long)oldkey,
				       (unsigned long long)newkey);
		if (!test_set_buffer_dirty(obh) && TestSetPageDirty(opage))
			BUG();

		WRITE_LOCK_IRQ(&btnc->tree_lock);
		radix_tree_delete(&btnc->page_tree, oldkey);
		radix_tree_tag_set(&btnc->page_tree, newkey,
				   PAGECACHE_TAG_DIRTY);
		WRITE_UNLOCK_IRQ(&btnc->tree_lock);

		opage->index = obh->b_blocknr = newkey;
		unlock_page(opage);
	} else {
		nilfs_copy_buffer(nbh, obh);
		nilfs_btnode_mark_dirty(nbh);

		nbh->b_blocknr = newkey;
		ctxt->bh = nbh;
		nilfs_btnode_delete(obh); /* will decrement bh->b_count */
	}
}
Пример #27
0
void nilfs_btnode_commit_change_key(struct address_space *btnc,
				    struct nilfs_btnode_chkey_ctxt *ctxt)
{
	struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh;
	__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
	struct page *opage;

	if (oldkey == newkey)
		return;

	if (nbh == NULL) {	
		opage = obh->b_page;
		if (unlikely(oldkey != opage->index))
			NILFS_PAGE_BUG(opage,
				       "invalid oldkey %lld (newkey=%lld)",
				       (unsigned long long)oldkey,
				       (unsigned long long)newkey);
		mark_buffer_dirty(obh);

		spin_lock_irq(&btnc->tree_lock);
		radix_tree_delete(&btnc->page_tree, oldkey);
		radix_tree_tag_set(&btnc->page_tree, newkey,
				   PAGECACHE_TAG_DIRTY);
		spin_unlock_irq(&btnc->tree_lock);

		opage->index = obh->b_blocknr = newkey;
		unlock_page(opage);
	} else {
		nilfs_copy_buffer(nbh, obh);
		mark_buffer_dirty(nbh);

		nbh->b_blocknr = newkey;
		ctxt->bh = nbh;
		nilfs_btnode_delete(obh); 
	}
}
Пример #28
0
static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id)
{
	struct hwspinlock *hwlock = NULL;
	int ret;

	mutex_lock(&hwspinlock_tree_lock);

	/* make sure the hwspinlock is not in use (tag is set) */
	ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
	if (ret == 0) {
		pr_err("hwspinlock %d still in use (or not present)\n", id);
		goto out;
	}

	hwlock = radix_tree_delete(&hwspinlock_tree, id);
	if (!hwlock) {
		pr_err("failed to delete hwspinlock %d\n", id);
		goto out;
	}

out:
	mutex_unlock(&hwspinlock_tree_lock);
	return hwlock;
}
Пример #29
0
STATIC int
xfs_reclaim_inode(
	struct xfs_inode	*ip,
	struct xfs_perag	*pag,
	int			sync_mode)
{
	int	error;

restart:
	error = 0;
	xfs_ilock(ip, XFS_ILOCK_EXCL);
	if (!xfs_iflock_nowait(ip)) {
		if (!(sync_mode & SYNC_WAIT))
			goto out;

		xfs_promote_inode(ip);
		xfs_iflock(ip);
	}

	if (is_bad_inode(VFS_I(ip)))
		goto reclaim;
	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
		xfs_iunpin_wait(ip);
		goto reclaim;
	}
	if (xfs_ipincount(ip)) {
		if (!(sync_mode & SYNC_WAIT)) {
			xfs_ifunlock(ip);
			goto out;
		}
		xfs_iunpin_wait(ip);
	}
	if (xfs_iflags_test(ip, XFS_ISTALE))
		goto reclaim;
	if (xfs_inode_clean(ip))
		goto reclaim;

	error = xfs_iflush(ip, SYNC_TRYLOCK | sync_mode);
	if (sync_mode & SYNC_WAIT) {
		if (error == EAGAIN) {
			xfs_iunlock(ip, XFS_ILOCK_EXCL);
			
			delay(2);
			goto restart;
		}
		xfs_iflock(ip);
		goto reclaim;
	}

	if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) {
		xfs_warn(ip->i_mount,
			"inode 0x%llx background reclaim flush failed with %d",
			(long long)ip->i_ino, error);
	}
out:
	xfs_iflags_clear(ip, XFS_IRECLAIM);
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	return 0;

reclaim:
	xfs_ifunlock(ip);
	xfs_iunlock(ip, XFS_ILOCK_EXCL);

	XFS_STATS_INC(xs_ig_reclaims);
	spin_lock(&pag->pag_ici_lock);
	if (!radix_tree_delete(&pag->pag_ici_root,
				XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
		ASSERT(0);
	__xfs_inode_clear_reclaim(pag, ip);
	spin_unlock(&pag->pag_ici_lock);

	xfs_ilock(ip, XFS_ILOCK_EXCL);
	xfs_qm_dqdetach(ip);
	xfs_iunlock(ip, XFS_ILOCK_EXCL);

	xfs_inode_free(ip);

	return error;
}
Пример #30
0
static void delete_irq_desc(unsigned int irq)
{
	radix_tree_delete(&irq_desc_tree, irq);
}