Esempio n. 1
0
int abort_dentry(struct txobj_thread_list_node * xnode){
				
	struct dentry * orig = (struct dentry *)xnode->orig_obj;
	struct _dentry * shadow = (struct _dentry *)xnode->shadow_obj;

	if(xnode->rw == ACCESS_R){
		atomic_dec(&shadow->tx_readcount);
		atomic_dec(&shadow->tx_refcount);
	} else {
		if((shadow->d_flags & DCACHE_SPECULATIVE_CREATE)){
			struct _dentry *old_dentry = orig->d_contents;

			/* Free up the speculatively created inode reference to force deletion */
			tx_atomic_dec(&shadow->d_inode->i_count);
			tx_cache_get_inode(shadow->d_inode)->i_nlink--;
			free_tx_dentry(shadow);

			/* Use this as a signal to release_dentry */
			old_dentry->d_flags |= DCACHE_SPECULATIVE_CREATE;
			
			/* We also need the dentry to be negative */
			KSTM_BUG_ON(old_dentry->d_inode != NULL);

			/* And hack to avoid messing up the parent
			 * dentry's refcount when we put, as we will
			 * rollback the refcount increase elsewhere
			 */
			tx_atomic_inc(&old_dentry->d_parent->d_count);

		} else
			free_tx_dentry(shadow);
	}

	return 0;
}
Esempio n. 2
0
void fastcall __put_unused_fd(struct files_struct *files, unsigned int fd)
{
	struct fdtable *fdt = files_fdtable(files);
	KSTM_BUG_ON(need_files_checkpoint());
	__FD_CLR(fd, fdt->open_fds);
	if (fd < files->next_fd)
		files->next_fd = fd;
}
Esempio n. 3
0
/* Do the cleanup/freeing work */
int release_dentry(struct txobj_thread_list_node * xnode, int early){

	struct dentry *dentry = (struct dentry*) xnode->orig_obj;

	// Account for the dropped inode reference here
	if(unlikely(dentry->d_contents->d_flags & DCACHE_SPECULATIVE_CREATE)){
		KSTM_BUG_ON(atomic_read(&dentry->d_count) > 2);
		tx_atomic_add_unless(&dentry->d_count, -1, 1);
	}
				
#ifdef CONFIG_TX_KSTM_ASSERTIONS
	free_tx_dentry((struct _dentry *)xnode->checkpoint_obj);
#endif
	
        // release the transaction's reference
	dput((struct dentry *)xnode->orig_obj);

	return 0;
}
Esempio n. 4
0
static inline struct _dentry *__shadow_copy_dentry(struct dentry *dentry,
						   struct _dentry *_dentry,
						   enum access_mode mode,
						   int task_count){
	struct _dentry *shadow_dentry;

	KSTM_BUG_ON(mode == ACCESS_R);

	//alloc a shadow dentry
	shadow_dentry = alloc_tx_dentry();
	if(!shadow_dentry)
		BUG(); /* Should probably call abort_self */

	if(task_count == 1)
		lock_my_dentry(dentry, 0);

	/* Go ahead and increment the refcount so we don't get freed.
	 * If we try to add something that is being actively deleted
	 * out from under us, just abort
	 */
	memcpy(shadow_dentry, _dentry, sizeof(struct _dentry));
	atomic_set(&shadow_dentry->tx_refcount, 1);

	if(task_count == 1)
		unlock_my_dentry(dentry, 0);

	shadow_dentry->shadow = _dentry;
	shadow_dentry->rw = mode;

	/* Fix up the d_name.name pointer */
	if(_dentry->d_iname == _dentry->d_name.name)
		shadow_dentry->d_name.name = shadow_dentry->d_iname;

	/* Finish init'ing the shadow copy */
	//list_init_tx(&shadow_dentry->d_subdirs, mode);
	//list_init_tx(&shadow_dentry->d_child, mode);
	//list_init_tx(&shadow_dentry->d_alias, mode);
	//hlist_init_tx(&shadow_dentry->d_hash, mode);

	OSA_PROTECT_ADDR(dentry, sizeof(struct _dentry));

	return shadow_dentry;
}
Esempio n. 5
0
struct _dentry * __tx_cache_get_dentry(struct dentry * dentry, enum access_mode mode){

	txobj_thread_list_node_t * list_node = NULL;
	struct _dentry *_dentry;
	struct _dentry *shadow;
	int task_count = atomic_read(&current->transaction->task_count);
	int should_sleep = 0;
	struct transaction *winner;

#ifdef CONFIG_TX_KSTM_PROF
	unsigned long long cycles, a;
	rdtscll(cycles);
#endif
#ifdef CONFIG_TX_KSTM_ASSERTIONS
 	struct _dentry * checkpoint;
	BUG_ON(dentry == NULL);
#endif

	/* Protect the read with an rcu read lock */
	rcu_read_lock();
 	_dentry = dentry->d_contents;
	rcu_read_unlock();

	KSTM_BUG_ON(_dentry == NULL);
	KSTM_BUG_ON(shadow(_dentry));

	/* Next, make sure we don't already have the object */
	list_node = workset_has_object(&dentry->xobj);

	if(list_node) {
		if(task_count > 1)
			lock_my_dentry(dentry, 1);
	workset_hit:
		shadow = list_node->shadow_obj;
		if(list_node->rw < mode){
			struct _dentry *old_shadow;
			
			/* Upgrade the mode */
			if(task_count == 1)
				LOCK_XOBJ(&dentry->xobj);
			winner = 
				upgrade_xobj_mode(list_node->tx_obj, mode, &should_sleep);
			if(winner){
				if(!should_sleep)
					winner = NULL;
					
				if(task_count == 1)
					UNLOCK_XOBJ(&dentry->xobj);
				else
					unlock_my_dentry(dentry, 1);
				abort_self(winner, 0);
			} 
			list_node->rw = mode;

			if(task_count == 1)
				UNLOCK_XOBJ(&dentry->xobj);

			/* the object is read-shared and we must copy it */
			old_shadow = shadow;
			shadow = __shadow_copy_dentry(dentry, _dentry,
						      mode, task_count);
			
			if(unlikely(IS_ERR(shadow))){
				if(task_count > 1)
					unlock_my_dentry(dentry, 1);
				goto out;
			}

			list_node->rw = mode;
			list_node->shadow_obj = shadow;

			atomic_dec(&old_shadow->tx_readcount);
			if(dentry->d_contents == old_shadow)
				atomic_dec(&old_shadow->tx_refcount);
			else 
				free_tx_dentry(old_shadow);
		}  
		if(task_count > 1)
			unlock_my_dentry(dentry, 1);
		goto out;
	}

	/* At this point, we definitely don't have the object.  Add
	 * it!
	 */
	if(task_count > 1){
		lock_my_dentry(dentry, 1);
		/* Recheck that another task didn't add the object */
		if((list_node = workset_has_object_locked(&dentry->xobj)))
			goto workset_hit;
	} else 
		LOCK_XOBJ(&dentry->xobj);
		
	list_node = tx_check_add_obj(&dentry->xobj, TYPE_DENTRY, mode, &should_sleep, &winner);

	if(unlikely(!list_node)){
		if(!should_sleep)
			winner = NULL;
		if(task_count > 1)
			unlock_my_dentry(dentry, 1);
		else 
			UNLOCK_XOBJ(&dentry->xobj);
		abort_self(winner, 0);
	}

	if(task_count == 1)
		UNLOCK_XOBJ(&dentry->xobj);


	/* Go ahead an increment the refcount so we don't get freed */
	tx_atomic_inc_nolog(&dentry->d_count);
	
	// Allocate the shadow copy and update the local workset

	if(mode == ACCESS_R){
		// Share it
		shadow = _dentry;

		//we'll ignore this bug for now:
		//if(atomic_read(&_dentry->tx_refcount) == 1)
		//	OSA_MAGIC(OSA_BREAKSIM);

		atomic_inc(&_dentry->tx_refcount);
		atomic_inc(&_dentry->tx_readcount);
	} else {
		// Get our own
		shadow = __shadow_copy_dentry(dentry, _dentry, 
					      mode, task_count);
		if(unlikely(IS_ERR(shadow)))
			goto out;
	}

#ifdef CONFIG_TX_KSTM_ASSERTIONS
	checkpoint = __checkpoint_dentry(_dentry);
	if(unlikely(checkpoint == NULL)){
		if(shadow != _dentry)
			__free_tx_dentry(shadow);
		shadow = ERR_PTR(-ETXABORT);
		goto out;
	}
#endif

	if(unlikely(__setup_list_node(list_node, dentry, shadow, 
#ifdef CONFIG_TX_KSTM_ASSERTIONS
				      checkpoint,
#endif				     
				      mode, &dentry->xobj)))
		shadow = ERR_PTR(-ETXABORT);

	if(task_count > 1)
		unlock_my_dentry(dentry, 1);
	
out:
#ifdef CONFIG_TX_KSTM_PROF
	rdtscll(a);
	shadowCopyCycles += (a - cycles);
#endif	
	
	return shadow;
}
Esempio n. 6
0
int validate_dentry(struct txobj_thread_list_node * xnode){
				
	struct _dentry * orig = ((struct dentry *)xnode->orig_obj)->d_contents;
	struct _dentry *checkpoint = xnode->checkpoint_obj;
	struct _dentry *shadow = xnode->shadow_obj;

	// Don't bother validating this
	//TX_VALIDATE_ATOMIC(orig, checkpoint, d_count);

	TX_VALIDATE(orig, checkpoint, d_flags);

	// Make sure this hasn't changed.  We don't update this,
	// though, as we are going to have the shadow copy point to
	// the shadow inode
	TX_VALIDATE(orig, checkpoint, d_inode);


	// Ignore d_hash - only used in non-speculative copy
	//hlist_validate_tx(&orig->d_hash, &checkpoint->d_hash);

	TX_VALIDATE(orig, checkpoint, d_parent);

	TX_VALIDATE(orig, checkpoint, d_name.hash);
	TX_VALIDATE(orig, checkpoint, d_name.len);
	TX_VALIDATE(orig, checkpoint, d_name.name);

	// Check the child 
	//list_validate_tx(&orig->d_subdirs, &checkpoint->d_subdirs);
	//list_validate_tx(&orig->d_child, &checkpoint->d_child);
	//list_validate_tx(&orig->d_alias, &checkpoint->d_alias);

	TX_VALIDATE(orig, checkpoint, d_time);
	TX_VALIDATE(orig, checkpoint, d_op);
#ifdef CONFIG_PROFILING
	TX_VALIDATE(orig, checkpoint, d_cookie);
#endif

	if(strncmp(orig->d_iname, checkpoint->d_iname, DNAME_INLINE_LEN_MIN) != 0){
		printk(KERN_ERR "Inconsistent value for %p\n", &orig->d_iname);
		BUG();
	}

	/* Also validate the shadow */
	if(xnode->rw == ACCESS_R){

		TX_VALIDATE(orig, shadow, d_flags);

		if(orig->d_inode != shadow->d_inode)
			KSTM_BUG_ON(orig->d_inode != shadow->d_inode);
			//KSTM_BUG_ON(orig->d_inode != shadow(shadow->d_inode));

		// Ignore d_hash - only used in non-speculative copy
		//hlist_validate_tx_ro(&orig->d_hash, &shadow->d_hash);

		TX_VALIDATE(orig, shadow, d_parent);
		TX_VALIDATE(orig, shadow, d_name.hash);
		TX_VALIDATE(orig, shadow, d_name.len);

		if(shadow->d_name.name == shadow->d_iname)
			KSTM_BUG_ON(orig->d_name.name != orig->d_iname);
		else
			TX_VALIDATE(orig, shadow, d_name.name);

		// Check the child 
		//list_validate_tx_ro(&orig->d_subdirs, &shadow->d_subdirs);
		//list_validate_tx_ro(&orig->d_child, &shadow->d_child);
		//list_validate_tx_ro(&orig->d_alias, &shadow->d_alias);

		TX_VALIDATE(orig, shadow, d_time);
		TX_VALIDATE(orig, shadow, d_op);
#ifdef CONFIG_PROFILING
		TX_VALIDATE(orig, shadow, d_cookie);
#endif

		if(strncmp(orig->d_iname, shadow->d_iname, DNAME_INLINE_LEN_MIN) != 0){
			printk(KERN_ERR "Inconsistent value for %p\n", &orig->d_iname);
			BUG();
		}
	}

	return 0;
}