Exemplo n.º 1
0
int bufferfork_to_invalidate(map_t *map, struct buffer_head *buffer)
{
	unsigned delta = tux3_inode_delta(map->inode);

	/*
	 * The userland shouldn't need to buffer fork on truncate
	 * path, because no async backend.  So, just make sure it.
	 */
	assert(buffer_can_modify(buffer, delta));

	return 0;
}
Exemplo n.º 2
0
void tux3_iattrdirty(struct inode *inode)
{
	struct tux3_inode *tuxnode = tux_inode(inode);
	unsigned delta = tux3_inode_delta(inode);
	unsigned flags = tuxnode->flags;

	/* If dirtied on this delta, nothing to do */
	if (tux3_iattrsta_has_delta(flags) &&
	    tux3_iattrsta_get_delta(flags) == tux3_delta(delta))
		return;

	trace("inum %Lu, delta %u", tuxnode->inum, delta);

	spin_lock(&tuxnode->lock);
	flags = tuxnode->flags;
	if (S_ISREG(inode->i_mode) || tux3_iattrsta_has_delta(flags)) {
		unsigned old_delta;

		/*
		 * For a regular file, and even if iattrs are clean,
		 * we have to provide stable idata for backend.
		 *
		 * Because backend may be committing data pages. If
		 * so, backend have to check idata->i_size, and may
		 * save dtree root. But previous delta doesn't have
		 * stable iattrs.
		 *
		 * So, this provides stable iattrs for regular file,
		 * even if previous delta is clean.
		 *
		 * Other types don't have this problem, because:
		 * - Never dirty iattr (e.g. volmap). IOW, iattrs are
		 *   always stable.
		 * - Or dirty iattr with data, e.g. directory updates
		 *   timestamp too with data blocks.
		 */
		if (S_ISREG(inode->i_mode) && !tux3_iattrsta_has_delta(flags))
			old_delta = tux3_delta(delta - 1);
		else
			old_delta = tux3_iattrsta_get_delta(flags);

		/* If delta is difference, iattrs was stabilized. Copy. */
		if (old_delta != tux3_delta(delta)) {
			struct tux3_iattr_data *idata =
				&tux3_inode_ddc(inode, old_delta)->idata;
			idata_copy(inode, idata);
		}
	}
	/* Update iattr state to current delta */
	tuxnode->flags = tux3_iattrsta_update(flags, delta);
	spin_unlock(&tuxnode->lock);
}
Exemplo n.º 3
0
int bufferfork_to_invalidate(map_t *map, struct buffer_head *buffer)
{
	if(DEBUG_MODE_U==1)
	{
		printf("\t\t\t\t%25s[U]  %25s  %4d  #in\n",__FILE__,__func__,__LINE__);
	}
	unsigned delta = tux3_inode_delta(map->inode);
	/*
	 * The userland shouldn't need to buffer fork on truncate
	 * path, because no async backend.  So, just make sure it.
	 */
	assert(buffer_can_modify(buffer, delta));

	return 0;
}
Exemplo n.º 4
0
/*
 * Mark inode dirty to delete. (called from ->drop_inode()).
 * Caller must hold inode->i_lock.
 */
void tux3_mark_inode_to_delete(struct inode *inode)
{
	struct sb *sb = tux_sb(inode->i_sb);
	struct tux3_inode *tuxnode = tux_inode(inode);
	unsigned delta;

	/* inode has dead mark already */
	if (tux3_inode_is_dead(tuxnode))
		return;

	change_begin_atomic(sb);

	delta = tux3_inode_delta(inode);
	__tux3_mark_inode_to_delete(inode, delta);

	/*
	 * Hack: this is called under inode->i_lock. So, we have to
	 * release inode->i_lock to call mark_inode_dirty_sync().
	 *
	 * FIXME: we want to set I_DIRTY_SYNC (I_DIRTY_SYNC will
	 * prevent the indo is freed) and wakeup flusher if need,
	 * while preventing inode is freed. Need better way to do.
	 */
	if (!(tux3_dirty_flags(inode, delta) & I_DIRTY_SYNC)) {
		/* FIXME: I_REFERENCED can't prevent completely */
		//inode->i_state |= I_REFERENCED;
		/* FIXME: I_WILL_FREE will bother igrab() grabs reference */
		inode->i_state |= I_WILL_FREE;
		spin_unlock(&inode->i_lock);

		/* Tell dead inode to backend by marking as dirty. */
		tux3_mark_inode_dirty_sync(inode);

		spin_lock(&inode->i_lock);
		inode->i_state &= ~I_WILL_FREE;
#ifdef __KERNEL__
		wake_up_bit(&inode->i_state, __I_NEW);
#endif
	}

	change_end_atomic(sb);
}
Exemplo n.º 5
0
/*
 * Invalidate buffer, this must be called from frontend like truncate.
 * Caller must hold lock_page(), and page->mapping must be valid.
 */
void tux3_invalidate_buffer(struct buffer_head *buffer)
{
	unsigned delta = tux3_inode_delta(buffer_inode(buffer));
	tux3_clear_buffer_dirty(buffer, delta);
	discard_buffer(buffer);
}