コード例 #1
0
long do_fsync(struct file *file, int datasync)
{
	int ret;
	int err;
	struct address_space *mapping = file->f_mapping;
	
	if (live_transaction()){
		/* DEP 5/27/10 - Defer fsync until commit. */
		struct deferred_object_operation *def_op;
		txobj_thread_list_node_t *list_node = workset_has_object(&file->f_mapping->host->xobj);

		if (!list_node) {
			tx_cache_get_file_ro(file);
			tx_cache_get_inode_ro(file->f_mapping->host);
			list_node = workset_has_object(&file->f_mapping->host->xobj); 
		}

		def_op = alloc_deferred_object_operation();
		INIT_LIST_HEAD(&def_op->list);
		def_op->type = DEFERRED_TYPE_FSYNC;
		def_op->u.fsync.datasync = datasync;
		def_op->u.fsync.file = file;

		/* DEP: Pin the file until the sync is executed */
		tx_atomic_inc_not_zero(&file->f_count);

		// XXX: Could probably use something finer grained here.  
		WORKSET_LOCK(current->transaction);
		list_add(&def_op->list, &list_node->deferred_operations);
		WORKSET_UNLOCK(current->transaction);
		return 0;
	}

	if (!file->f_op || !file->f_op->fsync) {
		/* Why?  We can still call filemap_fdatawrite */
		ret = -EINVAL;
		goto out;
	}

	ret = filemap_fdatawrite(mapping);

	/*
	 * We need to protect against concurrent writers, which could cause
	 * livelocks in fsync_buffers_list().
	 */
	if (!committing_transaction())
		mutex_lock(&mapping->host->i_mutex);
	err = file->f_op->fsync(file, file_get_dentry(file), datasync);
	if (!ret)
		ret = err;
	if (!committing_transaction())
		mutex_unlock(&mapping->host->i_mutex);
	err = filemap_fdatawait(mapping);
	if (!ret)
		ret = err;
out:
	return ret;
}
コード例 #2
0
struct _dentry * __tx_cache_get_dentry(struct dentry * dentry, enum access_mode mode){

	txobj_thread_list_node_t * list_node = NULL;
	struct _dentry *_dentry;
	struct _dentry *shadow;
	int task_count = atomic_read(&current->transaction->task_count);
	int should_sleep = 0;
	struct transaction *winner;

#ifdef CONFIG_TX_KSTM_PROF
	unsigned long long cycles, a;
	rdtscll(cycles);
#endif
#ifdef CONFIG_TX_KSTM_ASSERTIONS
 	struct _dentry * checkpoint;
	BUG_ON(dentry == NULL);
#endif

	/* Protect the read with an rcu read lock */
	rcu_read_lock();
 	_dentry = dentry->d_contents;
	rcu_read_unlock();

	KSTM_BUG_ON(_dentry == NULL);
	KSTM_BUG_ON(shadow(_dentry));

	/* Next, make sure we don't already have the object */
	list_node = workset_has_object(&dentry->xobj);

	if(list_node) {
		if(task_count > 1)
			lock_my_dentry(dentry, 1);
	workset_hit:
		shadow = list_node->shadow_obj;
		if(list_node->rw < mode){
			struct _dentry *old_shadow;
			
			/* Upgrade the mode */
			if(task_count == 1)
				LOCK_XOBJ(&dentry->xobj);
			winner = 
				upgrade_xobj_mode(list_node->tx_obj, mode, &should_sleep);
			if(winner){
				if(!should_sleep)
					winner = NULL;
					
				if(task_count == 1)
					UNLOCK_XOBJ(&dentry->xobj);
				else
					unlock_my_dentry(dentry, 1);
				abort_self(winner, 0);
			} 
			list_node->rw = mode;

			if(task_count == 1)
				UNLOCK_XOBJ(&dentry->xobj);

			/* the object is read-shared and we must copy it */
			old_shadow = shadow;
			shadow = __shadow_copy_dentry(dentry, _dentry,
						      mode, task_count);
			
			if(unlikely(IS_ERR(shadow))){
				if(task_count > 1)
					unlock_my_dentry(dentry, 1);
				goto out;
			}

			list_node->rw = mode;
			list_node->shadow_obj = shadow;

			atomic_dec(&old_shadow->tx_readcount);
			if(dentry->d_contents == old_shadow)
				atomic_dec(&old_shadow->tx_refcount);
			else 
				free_tx_dentry(old_shadow);
		}  
		if(task_count > 1)
			unlock_my_dentry(dentry, 1);
		goto out;
	}

	/* At this point, we definitely don't have the object.  Add
	 * it!
	 */
	if(task_count > 1){
		lock_my_dentry(dentry, 1);
		/* Recheck that another task didn't add the object */
		if((list_node = workset_has_object_locked(&dentry->xobj)))
			goto workset_hit;
	} else 
		LOCK_XOBJ(&dentry->xobj);
		
	list_node = tx_check_add_obj(&dentry->xobj, TYPE_DENTRY, mode, &should_sleep, &winner);

	if(unlikely(!list_node)){
		if(!should_sleep)
			winner = NULL;
		if(task_count > 1)
			unlock_my_dentry(dentry, 1);
		else 
			UNLOCK_XOBJ(&dentry->xobj);
		abort_self(winner, 0);
	}

	if(task_count == 1)
		UNLOCK_XOBJ(&dentry->xobj);


	/* Go ahead an increment the refcount so we don't get freed */
	tx_atomic_inc_nolog(&dentry->d_count);
	
	// Allocate the shadow copy and update the local workset

	if(mode == ACCESS_R){
		// Share it
		shadow = _dentry;

		//we'll ignore this bug for now:
		//if(atomic_read(&_dentry->tx_refcount) == 1)
		//	OSA_MAGIC(OSA_BREAKSIM);

		atomic_inc(&_dentry->tx_refcount);
		atomic_inc(&_dentry->tx_readcount);
	} else {
		// Get our own
		shadow = __shadow_copy_dentry(dentry, _dentry, 
					      mode, task_count);
		if(unlikely(IS_ERR(shadow)))
			goto out;
	}

#ifdef CONFIG_TX_KSTM_ASSERTIONS
	checkpoint = __checkpoint_dentry(_dentry);
	if(unlikely(checkpoint == NULL)){
		if(shadow != _dentry)
			__free_tx_dentry(shadow);
		shadow = ERR_PTR(-ETXABORT);
		goto out;
	}
#endif

	if(unlikely(__setup_list_node(list_node, dentry, shadow, 
#ifdef CONFIG_TX_KSTM_ASSERTIONS
				      checkpoint,
#endif				     
				      mode, &dentry->xobj)))
		shadow = ERR_PTR(-ETXABORT);

	if(task_count > 1)
		unlock_my_dentry(dentry, 1);
	
out:
#ifdef CONFIG_TX_KSTM_PROF
	rdtscll(a);
	shadowCopyCycles += (a - cycles);
#endif	
	
	return shadow;
}