Exemplo n.º 1
0
static long do_ioctl(struct file *filp, unsigned int cmd,
		unsigned long arg)
{
	int error = -ENOTTY;
	void *f;

	/* Whitelist check */
	if(live_transaction()){
		switch(cmd){
		case FIOCLEX:
		case FIONCLEX:
		case FIONBIO:
		case FIOASYNC:
		case FIOQSIZE:
		case TCGETS:
			break;
		default:
			if(current->transaction->unsupported_operation_action == UNSUPPORTED_ABORT){
				// Don't leap out of the stack
				printk(KERN_ERR "Aborting on unsupported ioctl in tx: %u\n", cmd);
				abort_self(NULL, 0);
			} else if(current->transaction->unsupported_operation_action == UNSUPPORTED_ERROR_CODE){
				printk(KERN_ERR "Warning: Stopped execution of unsupported ioctl in tx: %u\n", cmd);
				return -ENOTXSUPPORT;
			} else {
#ifdef CONFIG_TX_KSTM_WARNINGS
				printk(KERN_ERR "Warning: Executing unsupported ioctl in tx: %u\n", cmd);
#endif
			}
		}
	}

	if (!filp->f_op)
		goto out;

	if (filp->f_op->unlocked_ioctl) {
		error = filp->f_op->unlocked_ioctl(filp, cmd, arg);
		if (error == -ENOIOCTLCMD)
			error = -EINVAL;
		goto out;
	} else if ((f = filp->f_op->ioctl)) {
		lock_kernel();
		if (!filp->f_op->ioctl) {
			printk("%s: ioctl %p disappeared\n", __FUNCTION__, f);
			print_symbol("symbol: %s\n", (unsigned long)f);
			dump_stack();
		} else {
			error = filp->f_op->ioctl((file_get_dentry(filp)->d_inode),
						  filp, cmd, arg);
		}
		unlock_kernel();
	}

 out:
	return error;
}
Exemplo n.º 2
0
struct _dentry * __tx_cache_get_dentry(struct dentry * dentry, enum access_mode mode){

	txobj_thread_list_node_t * list_node = NULL;
	struct _dentry *_dentry;
	struct _dentry *shadow;
	int task_count = atomic_read(&current->transaction->task_count);
	int should_sleep = 0;
	struct transaction *winner;

#ifdef CONFIG_TX_KSTM_PROF
	unsigned long long cycles, a;
	rdtscll(cycles);
#endif
#ifdef CONFIG_TX_KSTM_ASSERTIONS
 	struct _dentry * checkpoint;
	BUG_ON(dentry == NULL);
#endif

	/* Protect the read with an rcu read lock */
	rcu_read_lock();
 	_dentry = dentry->d_contents;
	rcu_read_unlock();

	KSTM_BUG_ON(_dentry == NULL);
	KSTM_BUG_ON(shadow(_dentry));

	/* Next, make sure we don't already have the object */
	list_node = workset_has_object(&dentry->xobj);

	if(list_node) {
		if(task_count > 1)
			lock_my_dentry(dentry, 1);
	workset_hit:
		shadow = list_node->shadow_obj;
		if(list_node->rw < mode){
			struct _dentry *old_shadow;
			
			/* Upgrade the mode */
			if(task_count == 1)
				LOCK_XOBJ(&dentry->xobj);
			winner = 
				upgrade_xobj_mode(list_node->tx_obj, mode, &should_sleep);
			if(winner){
				if(!should_sleep)
					winner = NULL;
					
				if(task_count == 1)
					UNLOCK_XOBJ(&dentry->xobj);
				else
					unlock_my_dentry(dentry, 1);
				abort_self(winner, 0);
			} 
			list_node->rw = mode;

			if(task_count == 1)
				UNLOCK_XOBJ(&dentry->xobj);

			/* the object is read-shared and we must copy it */
			old_shadow = shadow;
			shadow = __shadow_copy_dentry(dentry, _dentry,
						      mode, task_count);
			
			if(unlikely(IS_ERR(shadow))){
				if(task_count > 1)
					unlock_my_dentry(dentry, 1);
				goto out;
			}

			list_node->rw = mode;
			list_node->shadow_obj = shadow;

			atomic_dec(&old_shadow->tx_readcount);
			if(dentry->d_contents == old_shadow)
				atomic_dec(&old_shadow->tx_refcount);
			else 
				free_tx_dentry(old_shadow);
		}  
		if(task_count > 1)
			unlock_my_dentry(dentry, 1);
		goto out;
	}

	/* At this point, we definitely don't have the object.  Add
	 * it!
	 */
	if(task_count > 1){
		lock_my_dentry(dentry, 1);
		/* Recheck that another task didn't add the object */
		if((list_node = workset_has_object_locked(&dentry->xobj)))
			goto workset_hit;
	} else 
		LOCK_XOBJ(&dentry->xobj);
		
	list_node = tx_check_add_obj(&dentry->xobj, TYPE_DENTRY, mode, &should_sleep, &winner);

	if(unlikely(!list_node)){
		if(!should_sleep)
			winner = NULL;
		if(task_count > 1)
			unlock_my_dentry(dentry, 1);
		else 
			UNLOCK_XOBJ(&dentry->xobj);
		abort_self(winner, 0);
	}

	if(task_count == 1)
		UNLOCK_XOBJ(&dentry->xobj);


	/* Go ahead an increment the refcount so we don't get freed */
	tx_atomic_inc_nolog(&dentry->d_count);
	
	// Allocate the shadow copy and update the local workset

	if(mode == ACCESS_R){
		// Share it
		shadow = _dentry;

		//we'll ignore this bug for now:
		//if(atomic_read(&_dentry->tx_refcount) == 1)
		//	OSA_MAGIC(OSA_BREAKSIM);

		atomic_inc(&_dentry->tx_refcount);
		atomic_inc(&_dentry->tx_readcount);
	} else {
		// Get our own
		shadow = __shadow_copy_dentry(dentry, _dentry, 
					      mode, task_count);
		if(unlikely(IS_ERR(shadow)))
			goto out;
	}

#ifdef CONFIG_TX_KSTM_ASSERTIONS
	checkpoint = __checkpoint_dentry(_dentry);
	if(unlikely(checkpoint == NULL)){
		if(shadow != _dentry)
			__free_tx_dentry(shadow);
		shadow = ERR_PTR(-ETXABORT);
		goto out;
	}
#endif

	if(unlikely(__setup_list_node(list_node, dentry, shadow, 
#ifdef CONFIG_TX_KSTM_ASSERTIONS
				      checkpoint,
#endif				     
				      mode, &dentry->xobj)))
		shadow = ERR_PTR(-ETXABORT);

	if(task_count > 1)
		unlock_my_dentry(dentry, 1);
	
out:
#ifdef CONFIG_TX_KSTM_PROF
	rdtscll(a);
	shadowCopyCycles += (a - cycles);
#endif	
	
	return shadow;
}