Example #1
0
/*
 * This function is called on the last close of an open file.
 */
void locks_remove_flock(struct file *filp)
{
	struct inode * inode = filp->f_dentry->d_inode; 
	struct file_lock file_lock, *fl;
	struct file_lock **before;

repeat:
	before = &inode->i_flock;
	while ((fl = *before) != NULL) {
		if ((fl->fl_flags & FL_FLOCK) && fl->fl_file == filp) {
			int (*lock)(struct file *, int, struct file_lock *);
			lock = NULL;
			if (filp->f_op)
				lock = filp->f_op->lock;
			if (lock) {
				file_lock = *fl;
				file_lock.fl_type = F_UNLCK;
			}
			locks_delete_lock(before, 0);
			if (lock) {
				lock(filp, F_SETLK, &file_lock);
				/* List may have changed: */
				goto repeat;
			}
			continue;
		}
		before = &fl->fl_next;
	}
}
Example #2
0
/*
 * This function is called when the file is being removed
 * from the task's fd array.
 */
void locks_remove_posix(struct file *filp, fl_owner_t owner)
{
	struct inode * inode = filp->f_dentry->d_inode;
	struct file_lock file_lock, *fl;
	struct file_lock **before;

	/*
	 * For POSIX locks we free all locks on this file for the given task.
	 */
repeat:
	before = &inode->i_flock;
	while ((fl = *before) != NULL) {
		if ((fl->fl_flags & FL_POSIX) && fl->fl_owner == owner) {
			int (*lock)(struct file *, int, struct file_lock *);
			lock = filp->f_op->lock;
			if (lock) {
				file_lock = *fl;
				file_lock.fl_type = F_UNLCK;
			}
			locks_delete_lock(before, 0);
			if (lock) {
				lock(filp, F_SETLK, &file_lock);
				/* List may have changed: */
				goto repeat;
			}
			continue;
		}
		before = &fl->fl_next;
	}
}
Example #3
0
static void flock_remove_locks(struct file_lock **before, struct file *filp)
{
	struct file_lock *fl;

 	while ((fl = *before) != NULL) {
		if ((fl->fl_file == filp) && (filp->f_count == 1))
 			locks_delete_lock(before, 0);
 		else
 			before = &fl->fl_next;
	}

	return;
}
Example #4
0
static void posix_remove_locks(struct file_lock **before, struct task_struct *task)
{
	struct file_lock *fl;

	while ((fl = *before) != NULL) {
		if (fl->fl_owner == task)
			locks_delete_lock(before, 0);
		else
			before = &fl->fl_next;
	}

	return;
}
Example #5
0
static int posix_lock_file(struct file *filp, struct file_lock *caller,
			   unsigned int wait)
{
	struct file_lock *fl;
	struct file_lock *new_fl, *new_fl2;
	struct file_lock *left = NULL;
	struct file_lock *right = NULL;
	struct file_lock **before;
	int error;
	int added = 0;

	/*
	 * We may need two file_lock structures for this operation,
	 * so we get them in advance to avoid races.
	 */
	new_fl  = locks_empty_lock();
	new_fl2 = locks_empty_lock();
	error = -ENOLCK; /* "no luck" */
	if (!(new_fl && new_fl2))
		goto out;
 
	if (caller->fl_type != F_UNLCK) {
  repeat:
		error = -EBUSY;
		if ((fl = filp->f_inode->i_flock) && (fl->fl_flags & FL_FLOCK))
			goto out;

		while (fl != NULL) {
			if (!posix_locks_conflict(caller, fl)) {
				fl = fl->fl_next;
				continue;
			}
			error = -EAGAIN;
			if (!wait)
				goto out;
			error = -EDEADLK;
			if (posix_locks_deadlock(caller->fl_owner, fl->fl_owner))
				goto out;
			error = -ERESTARTSYS;
			if (current->signal & ~current->blocked)
				goto out;
			locks_insert_block(fl, caller);
			interruptible_sleep_on(&caller->fl_wait);
			locks_delete_block(fl, caller);
			goto repeat;
  		}
  	}

	/*
	 * We've allocated the new locks in advance, so there are no
	 * errors possible (and no blocking operations) from here on.
	 * 
	 * Find the first old lock with the same owner as the new lock.
	 */
	
	before = &filp->f_inode->i_flock;

	error = -EBUSY;
	if ((*before != NULL) && ((*before)->fl_flags & FL_FLOCK))
		goto out;

	/* First skip locks owned by other processes.
	 */
	while ((fl = *before) && (caller->fl_owner != fl->fl_owner)) {
		before = &fl->fl_next;
	}

	/* Process locks with this owner.
	 */
	while ((fl = *before) && (caller->fl_owner == fl->fl_owner)) {
		/* Detect adjacent or overlapping regions (if same lock type)
		 */
		if (caller->fl_type == fl->fl_type) {
			if (fl->fl_end < caller->fl_start - 1)
				goto next_lock;
			/* If the next lock in the list has entirely bigger
			 * addresses than the new one, insert the lock here.
			 */
			if (fl->fl_start > caller->fl_end + 1)
				break;

			/* If we come here, the new and old lock are of the
			 * same type and adjacent or overlapping. Make one
			 * lock yielding from the lower start address of both
			 * locks to the higher end address.
			 */
			if (fl->fl_start > caller->fl_start)
				fl->fl_start = caller->fl_start;
			else
				caller->fl_start = fl->fl_start;
			if (fl->fl_end < caller->fl_end)
				fl->fl_end = caller->fl_end;
			else
				caller->fl_end = fl->fl_end;
			if (added) {
				locks_delete_lock(before, 0);
				continue;
			}
			caller = fl;
			added = 1;
		}
		else {
			/* Processing for different lock types is a bit
			 * more complex.
			 */
			if (fl->fl_end < caller->fl_start)
				goto next_lock;
			if (fl->fl_start > caller->fl_end)
				break;
			if (caller->fl_type == F_UNLCK)
				added = 1;
			if (fl->fl_start < caller->fl_start)
				left = fl;
			/* If the next lock in the list has a higher end
			 * address than the new one, insert the new one here.
			 */
			if (fl->fl_end > caller->fl_end) {
				right = fl;
				break;
			}
			if (fl->fl_start >= caller->fl_start) {
				/* The new lock completely replaces an old
				 * one (This may happen several times).
				 */
				if (added) {
					locks_delete_lock(before, 0);
					continue;
				}
				/* Replace the old lock with the new one.
				 * Wake up anybody waiting for the old one,
				 * as the change in lock type might satisfy
				 * their needs.
				 */
				locks_wake_up_blocks(fl, 0);
				fl->fl_start = caller->fl_start;
				fl->fl_end = caller->fl_end;
				fl->fl_type = caller->fl_type;
				caller = fl;
				added = 1;
			}
		}
		/* Go on to next lock.
		 */
	next_lock:
		before = &fl->fl_next;
	}

	error = 0;
	if (!added) {
		if (caller->fl_type == F_UNLCK)
			goto out;
		locks_init_lock(new_fl, caller);
		locks_insert_lock(before, new_fl);
		new_fl = NULL;
	}
	if (right) {
		if (left == right) {
			/* The new lock breaks the old one in two pieces,
			 * so we have to use the second new lock (in this
			 * case, even F_UNLCK may fail!).
			 */
			left = locks_init_lock(new_fl2, right);
			locks_insert_lock(before, left);
			new_fl2 = NULL;
		}
		right->fl_start = caller->fl_end + 1;
		locks_wake_up_blocks(right, 0);
	}
	if (left) {
		left->fl_end = caller->fl_start - 1;
		locks_wake_up_blocks(left, 0);
	}
out:
	/*
	 * Free any unused locks.  (They haven't
	 * ever been used, so we use kfree().)
	 */
	if (new_fl)
		kfree(new_fl);
	if (new_fl2)
		kfree(new_fl2);
	return error;
}
Example #6
0
/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks at
 * the head of the list, but that's secret knowledge known only to the next
 * two functions.
 */
static int flock_lock_file(struct file *filp, struct file_lock *caller,
			   unsigned int wait)
{
	struct file_lock *fl;
	struct file_lock *new_fl = NULL;
	struct file_lock **before;
	int error;
	int change;
	int unlock = (caller->fl_type == F_UNLCK);

	/*
	 * If we need a new lock, get it in advance to avoid races.
	 */
	if (!unlock) {
		error = -ENOLCK;
		new_fl = locks_alloc_lock(caller);
		if (!new_fl)
			goto out;
	}


	error = 0;
search:
	change = 0;

	before = &filp->f_inode->i_flock;

	if ((fl = *before) && (fl->fl_flags & FL_POSIX)) {
		error = -EBUSY;
		goto out;
	}

	while ((fl = *before) != NULL) {
		if (caller->fl_file == fl->fl_file) {
			if (caller->fl_type == fl->fl_type)
				goto out;
			change = 1;
			break;
		}
		before = &fl->fl_next;
	}
	/* change means that we are changing the type of an existing lock, or
	 * or else unlocking it.
	 */
	if (change) {
		/* N.B. What if the wait argument is false? */
		locks_delete_lock(before, !unlock);
		/*
		 * If we waited, another lock may have been added ...
		 */
		if (!unlock)
			goto search;
	}
	if (unlock)
		goto out;

repeat:
	/* Check signals each time we start */
	error = -ERESTARTSYS;
	if (current->signal & ~current->blocked)
		goto out;
	error = -EBUSY;
	if ((fl = filp->f_inode->i_flock) && (fl->fl_flags & FL_POSIX))
		goto out;

	while (fl != NULL) {
		if (!flock_locks_conflict(new_fl, fl)) {
			fl = fl->fl_next;
			continue;
		}
		error = -EAGAIN;
		if (!wait)
			goto out;
		locks_insert_block(fl, new_fl);
		interruptible_sleep_on(&new_fl->fl_wait);
		locks_delete_block(fl, new_fl);
		goto repeat;
	}
	locks_insert_lock(&filp->f_inode->i_flock, new_fl);
	new_fl = NULL;
	error = 0;

out:
	if (new_fl)
		locks_free_lock(new_fl);
	return (error);
}
Example #7
0
static int posix_lock_file(struct file *filp, struct file_lock *caller,
			   unsigned int wait)
{
	struct file_lock *fl;
	struct file_lock *new_fl;
	struct file_lock *left = NULL;
	struct file_lock *right = NULL;
	struct file_lock **before;
	int added = 0;

repeat:
	if ((fl = filp->f_inode->i_flock) && (fl->fl_flags & F_FLOCK))
		return (-EBUSY);

	if (caller->fl_type != F_UNLCK) {
		while (fl != NULL) {
			if (posix_locks_conflict(caller, fl)) {
				if (!wait)
					return (-EAGAIN);
				if (current->signal & ~current->blocked)
					return (-ERESTARTSYS);
				if (posix_locks_deadlock(caller->fl_owner, fl->fl_owner))
					return (-EDEADLK);
				interruptible_sleep_on(&fl->fl_wait);
				if (current->signal & ~current->blocked)
					return (-ERESTARTSYS);
				goto repeat;
			}
			fl = fl->fl_next;
  		}
  	}
	/*
	 * Find the first old lock with the same owner as the new lock.
	 */
	
	before = &filp->f_inode->i_flock;

	/* First skip FLOCK locks and locks owned by other processes.
	 */
	while ((fl = *before) && (caller->fl_owner != fl->fl_owner)) {
		before = &fl->fl_next;
	}
	

	/* Process locks with this owner.
	 */
	while ((fl = *before) && (caller->fl_owner == fl->fl_owner)) {
		/* Detect adjacent or overlapping regions (if same lock type)
		 */
		if (caller->fl_type == fl->fl_type) {
			if (fl->fl_end < caller->fl_start - 1)
				goto next_lock;
			/* If the next lock in the list has entirely bigger
			 * addresses than the new one, insert the lock here.
			 */
			if (fl->fl_start > caller->fl_end + 1)
				break;

			/* If we come here, the new and old lock are of the
			 * same type and adjacent or overlapping. Make one
			 * lock yielding from the lower start address of both
			 * locks to the higher end address.
			 */
			if (fl->fl_start > caller->fl_start)
				fl->fl_start = caller->fl_start;
			else
				caller->fl_start = fl->fl_start;
			if (fl->fl_end < caller->fl_end)
				fl->fl_end = caller->fl_end;
			else
				caller->fl_end = fl->fl_end;
			if (added) {
				locks_delete_lock(before, 0);
				continue;
			}
			caller = fl;
			added = 1;
		}
		else {
			/* Processing for different lock types is a bit
			 * more complex.
			 */
			if (fl->fl_end < caller->fl_start)
				goto next_lock;
			if (fl->fl_start > caller->fl_end)
				break;
			if (caller->fl_type == F_UNLCK)
				added = 1;
			if (fl->fl_start < caller->fl_start)
				left = fl;
			/* If the next lock in the list has a higher end
			 * address than the new one, insert the new one here.
			 */
			if (fl->fl_end > caller->fl_end) {
				right = fl;
				break;
			}
			if (fl->fl_start >= caller->fl_start) {
				/* The new lock completely replaces an old
				 * one (This may happen several times).
				 */
				if (added) {
					locks_delete_lock(before, 0);
					continue;
				}
				/* Replace the old lock with the new one.
				 * Wake up anybody waiting for the old one,
				 * as the change in lock type might satisfy
				 * their needs.
				 */
				wake_up(&fl->fl_wait);
				fl->fl_start = caller->fl_start;
				fl->fl_end = caller->fl_end;
				fl->fl_type = caller->fl_type;
				caller = fl;
				added = 1;
			}
		}
		/* Go on to next lock.
		 */
	next_lock:
		before = &fl->fl_next;
	}

	if (!added) {
		if (caller->fl_type == F_UNLCK)
			return (0);
		if ((new_fl = locks_alloc_lock(caller)) == NULL)
			return (-ENOLCK);
		locks_insert_lock(before, new_fl);
	}
	if (right) {
		if (left == right) {
			/* The new lock breaks the old one in two pieces, so we
			 * have to allocate one more lock (in this case, even
			 * F_UNLCK may fail!).
			 */
			if ((left = locks_alloc_lock(right)) == NULL) {
				if (!added)
					locks_delete_lock(before, 0);
				return (-ENOLCK);
			}
			locks_insert_lock(before, left);
		}
		right->fl_start = caller->fl_end + 1;
		wake_up(&right->fl_wait);
	}
	if (left) {
		left->fl_end = caller->fl_start - 1;
		wake_up(&left->fl_wait);
	}
	return (0);
}
Example #8
0
/* Try to create a FLOCK lock on filp. We always insert new locks at
 * the head of the list.
 */
static int flock_lock_file(struct file *filp, struct file_lock *caller,
			   unsigned int wait)
{
	struct file_lock *fl;
	struct file_lock *new_fl;
	struct file_lock **before;
	int change = 0;

	before = &filp->f_inode->i_flock;

	if ((fl = *before) && (fl->fl_flags & F_POSIX))
		return (-EBUSY);

	while ((fl = *before) != NULL) {
		if (caller->fl_file == fl->fl_file) {
			if (caller->fl_type == fl->fl_type)
				return (0);
			change = 1;
			break;
		}
		before = &fl->fl_next;
	}
	/* change means that we are changing the type of an existing lock, or
	 * or else unlocking it.
	 */
	if (change)
		locks_delete_lock(before, caller->fl_type != F_UNLCK);
	if (caller->fl_type == F_UNLCK)
		return (0);
	if ((new_fl = locks_alloc_lock(caller)) == NULL)
		return (-ENOLCK);
repeat:
	if ((fl = filp->f_inode->i_flock) && (fl->fl_flags & F_POSIX)) {
		locks_free_lock(new_fl);
		return (-EBUSY);
	}

	while (fl != NULL) {
		if (flock_locks_conflict(new_fl, fl)) {
			if (!wait) {
				locks_free_lock(new_fl);
				return (-EAGAIN);
			}
			if (current->signal & ~current->blocked) {
				/* Note: new_fl is not in any queue at this
				 * point, so we must use locks_free_lock()
				 * instead of locks_delete_lock()
				 * 	Dmitry Gorodchanin 09/02/96.
				 */
				locks_free_lock(new_fl);
				return (-ERESTARTSYS);
			}
			locks_insert_block(fl, new_fl);
			interruptible_sleep_on(&new_fl->fl_wait);
			wake_up(&new_fl->fl_wait);
			if (current->signal & ~current->blocked) {
				/* If we are here, than we were awakened
				 * by a signal, so new_fl is still in the
				 * block queue of fl. We need to remove 
				 * new_fl and then free it.
				 * 	Dmitry Gorodchanin 09/02/96.
				 */
				locks_delete_block(fl, new_fl);
				locks_free_lock(new_fl);
				return (-ERESTARTSYS);
			}
			goto repeat;
		}
		fl = fl->fl_next;
	}
	locks_insert_lock(&filp->f_inode->i_flock, new_fl);
	return (0);
}