Exemple #1
0
static int propagate_one(struct mount *m)
{
	struct mount *child;
	int type;
	/* skip ones added by this propagate_mnt() */
	if (IS_MNT_NEW(m))
		return 0;
	/* skip if mountpoint isn't covered by it */
	if (!is_subdir(mp->m_dentry, m->mnt.mnt_root))
		return 0;
	if (m->mnt_group_id == last_dest->mnt_group_id) {
		type = CL_MAKE_SHARED;
	} else {
		struct mount *n, *p;
		for (n = m; ; n = p) {
			p = n->mnt_master;
			if (p == dest_master || IS_MNT_MARKED(p)) {
				while (last_dest->mnt_master != p) {
					last_source = last_source->mnt_master;
					last_dest = last_source->mnt_parent;
				}
				if (n->mnt_group_id != last_dest->mnt_group_id) {
					last_source = last_source->mnt_master;
					last_dest = last_source->mnt_parent;
				}
				break;
			}
		}
		type = CL_SLAVE;
		/* beginning of peer group among the slaves? */
		if (IS_MNT_SHARED(m))
			type |= CL_MAKE_SHARED;
	}
		
	/* Notice when we are propagating across user namespaces */
	if (m->mnt_ns->user_ns != user_ns)
		type |= CL_UNPRIVILEGED;
	child = copy_tree(last_source, last_source->mnt.mnt_root, type);
	if (IS_ERR(child))
		return PTR_ERR(child);
	child->mnt.mnt_flags &= ~MNT_LOCKED;
	mnt_set_mountpoint(m, mp, child);
	last_dest = m;
	last_source = child;
	if (m->mnt_master != dest_master) {
		read_seqlock_excl(&mount_lock);
		SET_MNT_MARK(m->mnt_master);
		read_sequnlock_excl(&mount_lock);
	}
	hlist_add_head(&child->mnt_hash, list);
	return 0;
}
Exemple #2
0
/*
 * validate a vnode/inode
 * - there are several things we need to check
 *   - parent dir data changes (rm, rmdir, rename, mkdir, create, link,
 *     symlink)
 *   - parent dir metadata changed (security changes)
 *   - dentry data changed (write, truncate)
 *   - dentry metadata changed (security changes)
 */
int afs_validate(struct afs_vnode *vnode, struct key *key)
{
	time64_t now = ktime_get_real_seconds();
	bool valid = false;
	int ret;

	_enter("{v={%x:%u} fl=%lx},%x",
	       vnode->fid.vid, vnode->fid.vnode, vnode->flags,
	       key_serial(key));

	/* Quickly check the callback state.  Ideally, we'd use read_seqbegin
	 * here, but we have no way to pass the net namespace to the RCU
	 * cleanup for the server record.
	 */
	read_seqlock_excl(&vnode->cb_lock);

	if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
		if (vnode->cb_s_break != vnode->cb_interest->server->cb_s_break) {
			vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
		} else if (!test_bit(AFS_VNODE_DIR_MODIFIED, &vnode->flags) &&
			   !test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) &&
			   vnode->cb_expires_at - 10 > now) {
				valid = true;
		}
	} else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
		valid = true;
	}

	read_sequnlock_excl(&vnode->cb_lock);

	if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
		clear_nlink(&vnode->vfs_inode);

	if (valid)
		goto valid;

	mutex_lock(&vnode->validate_lock);

	/* if the promise has expired, we need to check the server again to get
	 * a new promise - note that if the (parent) directory's metadata was
	 * changed then the security may be different and we may no longer have
	 * access */
	if (!test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
		_debug("not promised");
		ret = afs_fetch_status(vnode, key);
		if (ret < 0) {
			if (ret == -ENOENT) {
				set_bit(AFS_VNODE_DELETED, &vnode->flags);
				ret = -ESTALE;
			}
			goto error_unlock;
		}
		_debug("new promise [fl=%lx]", vnode->flags);
	}

	if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
		_debug("file already deleted");
		ret = -ESTALE;
		goto error_unlock;
	}

	/* if the vnode's data version number changed then its contents are
	 * different */
	if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags))
		afs_zap_data(vnode);

	clear_bit(AFS_VNODE_DIR_MODIFIED, &vnode->flags);
	mutex_unlock(&vnode->validate_lock);
valid:
	_leave(" = 0");
	return 0;

error_unlock:
	mutex_unlock(&vnode->validate_lock);
	_leave(" = %d", ret);
	return ret;
}
Exemple #3
0
/*
 * Manage the records of cells known to a network namespace.  This includes
 * updating the DNS records and garbage collecting unused cells that were
 * automatically added.
 *
 * Note that constructed cell records may only be removed from net->cells by
 * this work item, so it is safe for this work item to stash a cursor pointing
 * into the tree and then return to caller (provided it skips cells that are
 * still under construction).
 *
 * Note also that we were given an increment on net->cells_outstanding by
 * whoever queued us that we need to deal with before returning.
 */
void afs_manage_cells(struct work_struct *work)
{
	struct afs_net *net = container_of(work, struct afs_net, cells_manager);
	struct rb_node *cursor;
	time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX;
	bool purging = !net->live;

	_enter("");

	/* Trawl the cell database looking for cells that have expired from
	 * lack of use and cells whose DNS results have expired and dispatch
	 * their managers.
	 */
	read_seqlock_excl(&net->cells_lock);

	for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) {
		struct afs_cell *cell =
			rb_entry(cursor, struct afs_cell, net_node);
		unsigned usage;
		bool sched_cell = false;

		usage = atomic_read(&cell->usage);
		_debug("manage %s %u", cell->name, usage);

		ASSERTCMP(usage, >=, 1);

		if (purging) {
			if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags))
				usage = atomic_dec_return(&cell->usage);
			ASSERTCMP(usage, ==, 1);
		}

		if (usage == 1) {
			time64_t expire_at = cell->last_inactive;

			if (!test_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags) &&
			    !test_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags))
				expire_at += afs_cell_gc_delay;
			if (purging || expire_at <= now)
				sched_cell = true;
			else if (expire_at < next_manage)
				next_manage = expire_at;
		}

		if (!purging) {
			if (cell->dns_expiry <= now)
				sched_cell = true;
			else if (cell->dns_expiry <= next_manage)
				next_manage = cell->dns_expiry;
		}

		if (sched_cell)
			queue_work(afs_wq, &cell->manager);
	}

	read_sequnlock_excl(&net->cells_lock);

	/* Update the timer on the way out.  We have to pass an increment on
	 * cells_outstanding in the namespace that we are in to the timer or
	 * the work scheduler.
	 */
	if (!purging && next_manage < TIME64_MAX) {
		now = ktime_get_real_seconds();

		if (next_manage - now <= 0) {
			if (queue_work(afs_wq, &net->cells_manager))
				atomic_inc(&net->cells_outstanding);
		} else {
			afs_set_cell_timer(net, next_manage - now);
		}
	}

	afs_dec_cells_outstanding(net);
	_leave(" [%d]", atomic_read(&net->cells_outstanding));
}
Exemple #4
0
/*
 * map the AFS file status to the inode member variables
 */
static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
{
	struct inode *inode = AFS_VNODE_TO_I(vnode);
	bool changed;

	_debug("FS: ft=%d lk=%d sz=%llu ver=%Lu mod=%hu",
	       vnode->status.type,
	       vnode->status.nlink,
	       (unsigned long long) vnode->status.size,
	       vnode->status.data_version,
	       vnode->status.mode);

	read_seqlock_excl(&vnode->cb_lock);

	switch (vnode->status.type) {
	case AFS_FTYPE_FILE:
		inode->i_mode	= S_IFREG | vnode->status.mode;
		inode->i_op	= &afs_file_inode_operations;
		inode->i_fop	= &afs_file_operations;
		break;
	case AFS_FTYPE_DIR:
		inode->i_mode	= S_IFDIR | vnode->status.mode;
		inode->i_op	= &afs_dir_inode_operations;
		inode->i_fop	= &afs_dir_file_operations;
		break;
	case AFS_FTYPE_SYMLINK:
		/* Symlinks with a mode of 0644 are actually mountpoints. */
		if ((vnode->status.mode & 0777) == 0644) {
			inode->i_flags |= S_AUTOMOUNT;

			set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);

			inode->i_mode	= S_IFDIR | 0555;
			inode->i_op	= &afs_mntpt_inode_operations;
			inode->i_fop	= &afs_mntpt_file_operations;
		} else {
			inode->i_mode	= S_IFLNK | vnode->status.mode;
			inode->i_op	= &afs_symlink_inode_operations;
		}
		inode_nohighmem(inode);
		break;
	default:
		printk("kAFS: AFS vnode with undefined type\n");
		read_sequnlock_excl(&vnode->cb_lock);
		return -EBADMSG;
	}

	changed = (vnode->status.size != inode->i_size);

	set_nlink(inode, vnode->status.nlink);
	inode->i_uid		= vnode->status.owner;
	inode->i_gid            = vnode->status.group;
	inode->i_size		= vnode->status.size;
	inode->i_ctime.tv_sec	= vnode->status.mtime_client;
	inode->i_ctime.tv_nsec	= 0;
	inode->i_atime		= inode->i_mtime = inode->i_ctime;
	inode->i_blocks		= 0;
	inode->i_generation	= vnode->fid.unique;
	inode_set_iversion_raw(inode, vnode->status.data_version);
	inode->i_mapping->a_ops	= &afs_fs_aops;

	read_sequnlock_excl(&vnode->cb_lock);

#ifdef CONFIG_AFS_FSCACHE
	if (changed)
		fscache_attr_changed(vnode->cache);
#endif
	return 0;
}