Ejemplo n.º 1
0
/*
 * they are necessary regardless sysfs is disabled.
 */
void au_si_free(struct kobject *kobj)
{
	struct au_sbinfo *sbinfo;
	char *locked __maybe_unused; /* debug only */

	sbinfo = container_of(kobj, struct au_sbinfo, si_kobj);
	AuDebugOn(!list_empty(&sbinfo->si_plink.head));
	AuDebugOn(atomic_read(&sbinfo->si_nowait.nw_len));

	au_rw_write_lock(&sbinfo->si_rwsem);
	au_br_free(sbinfo);
	au_rw_write_unlock(&sbinfo->si_rwsem);

	AuDebugOn(radix_tree_gang_lookup
		  (&sbinfo->au_si_pid.tree, (void **)&locked,
		   /*first_index*/PID_MAX_DEFAULT - 1,
		   /*max_items*/sizeof(locked)/sizeof(*locked)));

	kfree(sbinfo->si_branch);
	kfree(sbinfo->au_si_pid.bitmap);
	mutex_destroy(&sbinfo->si_xib_mtx);
	AuRwDestroy(&sbinfo->si_rwsem);

	kfree(sbinfo);
}
Ejemplo n.º 2
0
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, struct
			       btrfs_root *root)
{
	unsigned long gang[8];
	u64 first = 0;
	int ret;
	int i;

	while(1) {
		ret = radix_tree_gang_lookup(&root->fs_info->pinned_radix,
					     (void **)gang, 0,
					     ARRAY_SIZE(gang));
		if (!ret)
			break;
		if (!first)
			first = gang[0];
		for (i = 0; i < ret; i++) {
			radix_tree_delete(&root->fs_info->pinned_radix,
					  gang[i]);
		}
	}
	root->fs_info->last_insert.objectid = first;
	root->fs_info->last_insert.offset = 0;
	return 0;
}
Ejemplo n.º 3
0
/*
 * routine to check that the specified directory is empty (for rmdir)
 */
static int pmfs_empty_dir(struct inode *inode)
{
	struct super_block *sb;
	struct pmfs_inode_info *si = PMFS_I(inode);
	struct pmfs_inode_info_header *sih = si->header;
	struct pmfs_dir_logentry *entry;
	unsigned long pos = 0;
	struct pmfs_dir_logentry *entries[4];
	int nr_entries;
	int i;

	sb = inode->i_sb;
	nr_entries = radix_tree_gang_lookup(&sih->tree,
					(void **)entries, pos, 4);
	if (nr_entries > 2)
		return 0;

	for (i = 0; i < nr_entries; i++) {
		entry = entries[i];
		if (!is_dir_init_entry(sb, entry))
			return 0;
	}

	return 1;
}
Ejemplo n.º 4
0
void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
{
	struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
	struct mlx5e_vxlan *vxlan;
	unsigned int port = 0;

	spin_lock_irq(&vxlan_db->lock);
	while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) {
		port = vxlan->udp_port;
		spin_unlock_irq(&vxlan_db->lock);
		__mlx5e_vxlan_core_del_port(priv, (u16)port);
		spin_lock_irq(&vxlan_db->lock);
	}
	spin_unlock_irq(&vxlan_db->lock);
}
Ejemplo n.º 5
0
int main(void)
{
    char *test[] = {"abc", "def", "ghi", "jkl", "mno", "pqr", "stu", "vwx", "yz0",
                    "123", "456", "789", "zyx", "wvu", "tsr", "qpo", "nml", "kji"};

    int i = 0;
    int num = sizeof(test)/sizeof(*test);

    printf("num:%d\n", num);

    radix_tree_head *head = radix_tree_head_new();
    radix_tree_initial(head);

    if(!head)
    {
        printf("alloc head failed\n");
    }

    ///插入数据测试
    for(i = 0; i < num; i++)
    {
        radix_tree_insert(head, i, test[i]);
    }


    /// 从指定的index开始,查找固定max_items个数据
    void *result[5];

    radix_tree_gang_lookup(head,result,2,5); //从第二个之后开始找

    for(int i=0;i<6;i++)
    {
        printf("%d :%s\n",i,result[i]);
    }

    ///从指定位置查找数据
    for(i = 0; i < num; i++)
    {
        printf("%s\n",(char*) radix_tree_lookup(head, i));
    }

    for(i = 0; i < num; i++)
    {
        radix_tree_delete(head, i);
    }

    return 0;
}
Ejemplo n.º 6
0
void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
{
	struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
	struct mlx5e_vxlan *vxlan;
	unsigned int idx = 0;

	mlx5_vxlan_debugfs_cleanup(priv->mdev);
	spin_lock_irq(&vxlan_db->lock);
	while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, idx, 1)) {
		spin_unlock_irq(&vxlan_db->lock);
		idx = vxlan->udp_port;
		__mlx5e_vxlan_del_port(priv, vxlan->udp_port);
		spin_lock_irq(&vxlan_db->lock);
	}
	spin_unlock_irq(&vxlan_db->lock);
}
Ejemplo n.º 7
0
static int pci_segments_iterate(
    int (*handler)(struct pci_seg *, void *), void *arg)
{
    u16 seg = 0;
    int rc = 0;

    do {
        struct pci_seg *pseg;

        if ( !radix_tree_gang_lookup(&pci_segments, (void **)&pseg, seg, 1) )
            break;
        rc = handler(pseg, arg);
        seg = pseg->nr + 1;
    } while (!rc && seg);

    return rc;
}
Ejemplo n.º 8
0
Archivo: dir.c Proyecto: NVSL/NOVA
static u64 nova_find_next_dentry_addr(struct super_block *sb,
	struct nova_inode_info_header *sih, u64 pos)
{
	struct nova_sb_info *sbi = NOVA_SB(sb);
	struct nova_file_write_entry *entry = NULL;
	struct nova_file_write_entry *entries[1];
	int nr_entries;
	u64 addr = 0;

	nr_entries = radix_tree_gang_lookup(&sih->tree,
					(void **)entries, pos, 1);
	if (nr_entries == 1) {
		entry = entries[0];
		addr = nova_get_addr_off(sbi, entry);
	}

	return addr;
}
Ejemplo n.º 9
0
int btrfs_free_block_groups(struct btrfs_fs_info *info)
{
	int ret;
	struct btrfs_block_group_cache *cache[8];
	int i;

	while(1) {
		ret = radix_tree_gang_lookup(&info->block_group_radix,
					     (void **)cache, 0,
					     ARRAY_SIZE(cache));
		if (!ret)
			break;
		for (i = 0; i < ret; i++) {
			radix_tree_delete(&info->block_group_radix,
					  cache[i]->key.objectid +
					  cache[i]->key.offset - 1);
			free(cache[i]);
		}
	}
	return 0;
}
Ejemplo n.º 10
0
Archivo: dir.c Proyecto: NVSL/NOVA
void nova_delete_dir_tree(struct super_block *sb,
	struct nova_inode_info_header *sih)
{
	struct nova_dentry *direntry;
	unsigned long pos = 0;
	struct nova_dentry *entries[FREE_BATCH];
	timing_t delete_time;
	int nr_entries;
	int i;
	void *ret;

	NOVA_START_TIMING(delete_dir_tree_t, delete_time);

	do {
		nr_entries = radix_tree_gang_lookup(&sih->tree,
					(void **)entries, pos, FREE_BATCH);
		for (i = 0; i < nr_entries; i++) {
			direntry = entries[i];
			BUG_ON(!direntry);
			pos = BKDRHash(direntry->name, direntry->name_len);
			ret = radix_tree_delete(&sih->tree, pos);
			if (!ret || ret != direntry) {
				nova_err(sb, "dentry: type %d, inode %llu, "
					"name %s, namelen %u, rec len %u\n",
					direntry->entry_type,
					le64_to_cpu(direntry->ino),
					direntry->name, direntry->name_len,
					le16_to_cpu(direntry->de_len));
				if (!ret)
					nova_dbg("ret is NULL\n");
			}
		}
		pos++;
	} while (nr_entries == FREE_BATCH);

	NOVA_END_TIMING(delete_dir_tree_t, delete_time);
	return;
}
Ejemplo n.º 11
0
STATIC xfs_inode_t *
xfs_inode_ag_lookup(
	struct xfs_mount	*mp,
	struct xfs_perag	*pag,
	uint32_t		*first_index,
	int			tag)
{
	int			nr_found;
	struct xfs_inode	*ip;

	/*
	 * use a gang lookup to find the next inode in the tree
	 * as the tree is sparse and a gang lookup walks to find
	 * the number of objects requested.
	 */
	if (tag == XFS_ICI_NO_TAG) {
		nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
				(void **)&ip, *first_index, 1);
	} else {
		nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
				(void **)&ip, *first_index, 1, tag);
	}
	if (!nr_found)
		return NULL;

	/*
	 * Update the index for the next lookup. Catch overflows
	 * into the next AG range which can occur if we have inodes
	 * in the last block of the AG and we are currently
	 * pointing to the last inode.
	 */
	*first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
	if (*first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
		return NULL;
	return ip;
}
Ejemplo n.º 12
0
static int update_block_group(struct btrfs_trans_handle *trans,
			      struct btrfs_root *root,
			      u64 blocknr, u64 num, int alloc)
{
	struct btrfs_block_group_cache *cache;
	struct btrfs_fs_info *info = root->fs_info;
	u64 total = num;
	u64 old_val;
	u64 block_in_group;
	int ret;

	while(total) {
		ret = radix_tree_gang_lookup(&info->block_group_radix,
					     (void **)&cache, blocknr, 1);
		if (!ret)
			return -1;
		radix_tree_tag_set(&info->block_group_radix,
				   cache->key.objectid + cache->key.offset - 1,
				   BTRFS_BLOCK_GROUP_DIRTY);

		block_in_group = blocknr - cache->key.objectid;
		old_val = btrfs_block_group_used(&cache->item);
		if (total > cache->key.offset - block_in_group)
			num = cache->key.offset - block_in_group;
		else
			num = total;
		total -= num;
		blocknr += num;
		if (alloc)
			old_val += num;
		else
			old_val -= num;
		btrfs_set_block_group_used(&cache->item, old_val);
	}
	return 0;
}
Ejemplo n.º 13
0
STATIC int
xfs_qm_dquot_walk(
	struct xfs_mount	*mp,
	int			type,
	int			(*execute)(struct xfs_dquot *dqp, void *data),
	void			*data)
{
	struct xfs_quotainfo	*qi = mp->m_quotainfo;
	struct radix_tree_root	*tree = XFS_DQUOT_TREE(qi, type);
	uint32_t		next_index;
	int			last_error = 0;
	int			skipped;
	int			nr_found;

restart:
	skipped = 0;
	next_index = 0;
	nr_found = 0;

	while (1) {
		struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
		int		error = 0;
		int		i;

		mutex_lock(&qi->qi_tree_lock);
		nr_found = radix_tree_gang_lookup(tree, (void **)batch,
					next_index, XFS_DQ_LOOKUP_BATCH);
		if (!nr_found) {
			mutex_unlock(&qi->qi_tree_lock);
			break;
		}

		for (i = 0; i < nr_found; i++) {
			struct xfs_dquot *dqp = batch[i];

			next_index = be32_to_cpu(dqp->q_core.d_id) + 1;

			error = execute(batch[i], data);
			if (error == EAGAIN) {
				skipped++;
				continue;
			}
			if (error && last_error != EFSCORRUPTED)
				last_error = error;
		}

		mutex_unlock(&qi->qi_tree_lock);

		/* bail out if the filesystem is corrupted.  */
		if (last_error == EFSCORRUPTED) {
			skipped = 0;
			break;
		}
	}

	if (skipped) {
		delay(1);
		goto restart;
	}

	return last_error;
}
Ejemplo n.º 14
0
STATIC int
xfs_inode_ag_walk(
	struct xfs_mount	*mp,
	struct xfs_perag	*pag,
	int			(*execute)(struct xfs_inode *ip,
					   struct xfs_perag *pag, int flags),
	int			flags)
{
	uint32_t		first_index;
	int			last_error = 0;
	int			skipped;
	int			done;
	int			nr_found;

restart:
	done = 0;
	skipped = 0;
	first_index = 0;
	nr_found = 0;
	do {
		struct xfs_inode *batch[XFS_LOOKUP_BATCH];
		int		error = 0;
		int		i;

		rcu_read_lock();
		nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
					(void **)batch, first_index,
					XFS_LOOKUP_BATCH);
		if (!nr_found) {
			rcu_read_unlock();
			break;
		}

		/*
		 * Grab the inodes before we drop the lock. if we found
		 * nothing, nr == 0 and the loop will be skipped.
		 */
		for (i = 0; i < nr_found; i++) {
			struct xfs_inode *ip = batch[i];

			if (done || xfs_inode_ag_walk_grab(ip))
				batch[i] = NULL;

			/*
			 * Update the index for the next lookup. Catch
			 * overflows into the next AG range which can occur if
			 * we have inodes in the last block of the AG and we
			 * are currently pointing to the last inode.
			 *
			 * Because we may see inodes that are from the wrong AG
			 * due to RCU freeing and reallocation, only update the
			 * index if it lies in this AG. It was a race that lead
			 * us to see this inode, so another lookup from the
			 * same index will not find it again.
			 */
			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
				continue;
			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
				done = 1;
		}

		/* unlock now we've grabbed the inodes. */
		rcu_read_unlock();

		for (i = 0; i < nr_found; i++) {
			if (!batch[i])
				continue;
			error = execute(batch[i], pag, flags);
			IRELE(batch[i]);
			if (error == EAGAIN) {
				skipped++;
				continue;
			}
			if (error && last_error != EFSCORRUPTED)
				last_error = error;
		}

		/* bail out if the filesystem is corrupted.  */
		if (error == EFSCORRUPTED)
			break;

	} while (nr_found && !done);

	if (skipped) {
		delay(1);
		goto restart;
	}
	return last_error;
}
Ejemplo n.º 15
0
Archivo: dir.c Proyecto: NVSL/NOVA
static int nova_readdir(struct file *file, struct dir_context *ctx)
{
	struct inode *inode = file_inode(file);
	struct super_block *sb = inode->i_sb;
	struct nova_inode *pidir;
	struct nova_inode_info *si = NOVA_I(inode);
	struct nova_inode_info_header *sih = &si->header;
	struct nova_inode *child_pi;
	struct nova_dentry *entry;
	struct nova_dentry *entries[FREE_BATCH];
	int nr_entries;
	u64 pi_addr;
	unsigned long pos = 0;
	ino_t ino;
	int i;
	int ret;
	timing_t readdir_time;

	NOVA_START_TIMING(readdir_t, readdir_time);
	pidir = nova_get_inode(sb, inode);
	nova_dbgv("%s: ino %llu, size %llu, pos %llu\n",
			__func__, (u64)inode->i_ino,
			pidir->i_size, ctx->pos);

	if (!sih) {
		nova_dbg("%s: inode %lu sih does not exist!\n",
				__func__, inode->i_ino);
		ctx->pos = READDIR_END;
		return 0;
	}

	pos = ctx->pos;
	if (pos == READDIR_END)
		goto out;

	do {
		nr_entries = radix_tree_gang_lookup(&sih->tree,
					(void **)entries, pos, FREE_BATCH);
		for (i = 0; i < nr_entries; i++) {
			entry = entries[i];
			pos = BKDRHash(entry->name, entry->name_len);
			ino = __le64_to_cpu(entry->ino);
			if (ino == 0)
				continue;

			ret = nova_get_inode_address(sb, ino, &pi_addr, 0);
			if (ret) {
				nova_dbg("%s: get child inode %lu address "
					"failed %d\n", __func__, ino, ret);
				ctx->pos = READDIR_END;
				return ret;
			}

			child_pi = nova_get_block(sb, pi_addr);
			nova_dbgv("ctx: ino %llu, name %s, "
				"name_len %u, de_len %u\n",
				(u64)ino, entry->name, entry->name_len,
				entry->de_len);
			if (!dir_emit(ctx, entry->name, entry->name_len,
				ino, IF2DT(le16_to_cpu(child_pi->i_mode)))) {
				nova_dbgv("Here: pos %llu\n", ctx->pos);
				return 0;
			}
			ctx->pos = pos + 1;
		}
		pos++;
	} while (nr_entries == FREE_BATCH);

out:
	NOVA_END_TIMING(readdir_t, readdir_time);
	return 0;
}
Ejemplo n.º 16
0
STATIC int
xfs_inode_ag_walk(
	struct xfs_mount	*mp,
	struct xfs_perag	*pag,
	int			(*execute)(struct xfs_inode *ip,
					   struct xfs_perag *pag, int flags),
	int			flags)
{
	uint32_t		first_index;
	int			last_error = 0;
	int			skipped;
	int			done;
	int			nr_found;

restart:
	done = 0;
	skipped = 0;
	first_index = 0;
	nr_found = 0;
	do {
		struct xfs_inode *batch[XFS_LOOKUP_BATCH];
		int		error = 0;
		int		i;

		rcu_read_lock();
		nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
					(void **)batch, first_index,
					XFS_LOOKUP_BATCH);
		if (!nr_found) {
			rcu_read_unlock();
			break;
		}

		for (i = 0; i < nr_found; i++) {
			struct xfs_inode *ip = batch[i];

			if (done || xfs_inode_ag_walk_grab(ip))
				batch[i] = NULL;

			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
				continue;
			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
				done = 1;
		}

		
		rcu_read_unlock();

		for (i = 0; i < nr_found; i++) {
			if (!batch[i])
				continue;
			error = execute(batch[i], pag, flags);
			IRELE(batch[i]);
			if (error == EAGAIN) {
				skipped++;
				continue;
			}
			if (error && last_error != EFSCORRUPTED)
				last_error = error;
		}

		
		if (error == EFSCORRUPTED)
			break;

		cond_resched();

	} while (nr_found && !done);

	if (skipped) {
		delay(1);
		goto restart;
	}
	return last_error;
}
Ejemplo n.º 17
0
/*
 * Sync all the inodes in the given AG according to the
 * direction given by the flags.
 */
STATIC int
xfs_sync_inodes_ag(
	xfs_mount_t	*mp,
	int		ag,
	int		flags)
{
	xfs_perag_t	*pag = &mp->m_perag[ag];
	int		nr_found;
	uint32_t	first_index = 0;
	int		error = 0;
	int		last_error = 0;
	int		fflag = XFS_B_ASYNC;

	if (flags & SYNC_DELWRI)
		fflag = XFS_B_DELWRI;
	if (flags & SYNC_WAIT)
		fflag = 0;		/* synchronous overrides all */

	do {
		struct inode	*inode;
		xfs_inode_t	*ip = NULL;
		int		lock_flags = XFS_ILOCK_SHARED;

		/*
		 * use a gang lookup to find the next inode in the tree
		 * as the tree is sparse and a gang lookup walks to find
		 * the number of objects requested.
		 */
		read_lock(&pag->pag_ici_lock);
		nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
				(void**)&ip, first_index, 1);

		if (!nr_found) {
			read_unlock(&pag->pag_ici_lock);
			break;
		}

		/*
		 * Update the index for the next lookup. Catch overflows
		 * into the next AG range which can occur if we have inodes
		 * in the last block of the AG and we are currently
		 * pointing to the last inode.
		 */
		first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
		if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) {
			read_unlock(&pag->pag_ici_lock);
			break;
		}

		/* nothing to sync during shutdown */
		if (XFS_FORCED_SHUTDOWN(mp)) {
			read_unlock(&pag->pag_ici_lock);
			return 0;
		}

		/*
		 * If we can't get a reference on the inode, it must be
		 * in reclaim. Leave it for the reclaim code to flush.
		 */
		inode = VFS_I(ip);
		if (!igrab(inode)) {
			read_unlock(&pag->pag_ici_lock);
			continue;
		}
		read_unlock(&pag->pag_ici_lock);

		/* avoid new or bad inodes */
		if (is_bad_inode(inode) ||
		    xfs_iflags_test(ip, XFS_INEW)) {
			IRELE(ip);
			continue;
		}

		/*
		 * If we have to flush data or wait for I/O completion
		 * we need to hold the iolock.
		 */
		if ((flags & SYNC_DELWRI) && VN_DIRTY(inode)) {
			xfs_ilock(ip, XFS_IOLOCK_SHARED);
			lock_flags |= XFS_IOLOCK_SHARED;
			error = xfs_flush_pages(ip, 0, -1, fflag, FI_NONE);
			if (flags & SYNC_IOWAIT)
				xfs_ioend_wait(ip);
		}
		xfs_ilock(ip, XFS_ILOCK_SHARED);

		if ((flags & SYNC_ATTR) && !xfs_inode_clean(ip)) {
			if (flags & SYNC_WAIT) {
				xfs_iflock(ip);
				if (!xfs_inode_clean(ip))
					error = xfs_iflush(ip, XFS_IFLUSH_SYNC);
				else
					xfs_ifunlock(ip);
			} else if (xfs_iflock_nowait(ip)) {
				if (!xfs_inode_clean(ip))
					error = xfs_iflush(ip, XFS_IFLUSH_DELWRI);
				else
					xfs_ifunlock(ip);
			}
		}
		xfs_iput(ip, lock_flags);

		if (error)
			last_error = error;
		/*
		 * bail out if the filesystem is corrupted.
		 */
		if (error == EFSCORRUPTED)
			return XFS_ERROR(error);

	} while (nr_found);

	return last_error;
}