Exemple #1
0
int
sys_semop(struct lwp *l, const struct sys_semop_args *uap, register_t *retval)
{
	/* {
		syscallarg(int) semid;
		syscallarg(struct sembuf *) sops;
		syscallarg(size_t) nsops;
	} */
	struct proc *p = l->l_proc;
	int semid = SCARG(uap, semid), seq;
	size_t nsops = SCARG(uap, nsops);
	struct sembuf small_sops[SMALL_SOPS];
	struct sembuf *sops;
	struct semid_ds *semaptr;
	struct sembuf *sopptr = NULL;
	struct __sem *semptr = NULL;
	struct sem_undo *suptr = NULL;
	kauth_cred_t cred = l->l_cred;
	int i, error;
	int do_wakeup, do_undos;

	SEM_PRINTF(("call to semop(%d, %p, %zd)\n", semid, SCARG(uap,sops), nsops));

	if (__predict_false((p->p_flag & PK_SYSVSEM) == 0)) {
		mutex_enter(p->p_lock);
		p->p_flag |= PK_SYSVSEM;
		mutex_exit(p->p_lock);
	}

restart:
	if (nsops <= SMALL_SOPS) {
		sops = small_sops;
	} else if (nsops <= seminfo.semopm) {
		sops = kmem_alloc(nsops * sizeof(*sops), KM_SLEEP);
	} else {
		SEM_PRINTF(("too many sops (max=%d, nsops=%zd)\n",
		    seminfo.semopm, nsops));
		return (E2BIG);
	}

	error = copyin(SCARG(uap, sops), sops, nsops * sizeof(sops[0]));
	if (error) {
		SEM_PRINTF(("error = %d from copyin(%p, %p, %zd)\n", error,
		    SCARG(uap, sops), &sops, nsops * sizeof(sops[0])));
		if (sops != small_sops)
			kmem_free(sops, nsops * sizeof(*sops));
		return error;
	}

	mutex_enter(&semlock);
	/* In case of reallocation, we will wait for completion */
	while (__predict_false(sem_realloc_state))
		cv_wait(&sem_realloc_cv, &semlock);

	semid = IPCID_TO_IX(semid);	/* Convert back to zero origin */
	if (semid < 0 || semid >= seminfo.semmni) {
		error = EINVAL;
		goto out;
	}

	semaptr = &sema[semid];
	seq = IPCID_TO_SEQ(SCARG(uap, semid));
	if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 ||
	    semaptr->sem_perm._seq != seq) {
		error = EINVAL;
		goto out;
	}

	if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_W))) {
		SEM_PRINTF(("error = %d from ipaccess\n", error));
		goto out;
	}

	for (i = 0; i < nsops; i++)
		if (sops[i].sem_num >= semaptr->sem_nsems) {
			error = EFBIG;
			goto out;
		}

	/*
	 * Loop trying to satisfy the vector of requests.
	 * If we reach a point where we must wait, any requests already
	 * performed are rolled back and we go to sleep until some other
	 * process wakes us up.  At this point, we start all over again.
	 *
	 * This ensures that from the perspective of other tasks, a set
	 * of requests is atomic (never partially satisfied).
	 */
	do_undos = 0;

	for (;;) {
		do_wakeup = 0;

		for (i = 0; i < nsops; i++) {
			sopptr = &sops[i];
			semptr = &semaptr->_sem_base[sopptr->sem_num];

			SEM_PRINTF(("semop:  semaptr=%p, sem_base=%p, "
			    "semptr=%p, sem[%d]=%d : op=%d, flag=%s\n",
			    semaptr, semaptr->_sem_base, semptr,
			    sopptr->sem_num, semptr->semval, sopptr->sem_op,
			    (sopptr->sem_flg & IPC_NOWAIT) ?
			    "nowait" : "wait"));

			if (sopptr->sem_op < 0) {
				if ((int)(semptr->semval +
				    sopptr->sem_op) < 0) {
					SEM_PRINTF(("semop:  "
					    "can't do it now\n"));
					break;
				} else {
					semptr->semval += sopptr->sem_op;
					if (semptr->semval == 0 &&
					    semptr->semzcnt > 0)
						do_wakeup = 1;
				}
				if (sopptr->sem_flg & SEM_UNDO)
					do_undos = 1;
			} else if (sopptr->sem_op == 0) {
				if (semptr->semval > 0) {
					SEM_PRINTF(("semop:  not zero now\n"));
					break;
				}
			} else {
				if (semptr->semncnt > 0)
					do_wakeup = 1;
				semptr->semval += sopptr->sem_op;
				if (sopptr->sem_flg & SEM_UNDO)
					do_undos = 1;
			}
		}

		/*
		 * Did we get through the entire vector?
		 */
		if (i >= nsops)
			goto done;

		/*
		 * No ... rollback anything that we've already done
		 */
		SEM_PRINTF(("semop:  rollback 0 through %d\n", i - 1));
		while (i-- > 0)
			semaptr->_sem_base[sops[i].sem_num].semval -=
			    sops[i].sem_op;

		/*
		 * If the request that we couldn't satisfy has the
		 * NOWAIT flag set then return with EAGAIN.
		 */
		if (sopptr->sem_flg & IPC_NOWAIT) {
			error = EAGAIN;
			goto out;
		}

		if (sopptr->sem_op == 0)
			semptr->semzcnt++;
		else
			semptr->semncnt++;

		sem_waiters++;
		SEM_PRINTF(("semop:  good night!\n"));
		error = cv_wait_sig(&semcv[semid], &semlock);
		SEM_PRINTF(("semop:  good morning (error=%d)!\n", error));
		sem_waiters--;

		/* Notify reallocator, if it is waiting */
		cv_broadcast(&sem_realloc_cv);

		/*
		 * Make sure that the semaphore still exists
		 */
		if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 ||
		    semaptr->sem_perm._seq != seq) {
			error = EIDRM;
			goto out;
		}

		/*
		 * The semaphore is still alive.  Readjust the count of
		 * waiting processes.
		 */
		semptr = &semaptr->_sem_base[sopptr->sem_num];
		if (sopptr->sem_op == 0)
			semptr->semzcnt--;
		else
			semptr->semncnt--;

		/* In case of such state, restart the call */
		if (sem_realloc_state) {
			mutex_exit(&semlock);
			goto restart;
		}

		/* Is it really morning, or was our sleep interrupted? */
		if (error != 0) {
			error = EINTR;
			goto out;
		}
		SEM_PRINTF(("semop:  good morning!\n"));
	}

done:
	/*
	 * Process any SEM_UNDO requests.
	 */
	if (do_undos) {
		for (i = 0; i < nsops; i++) {
			/*
			 * We only need to deal with SEM_UNDO's for non-zero
			 * op's.
			 */
			int adjval;

			if ((sops[i].sem_flg & SEM_UNDO) == 0)
				continue;
			adjval = sops[i].sem_op;
			if (adjval == 0)
				continue;
			error = semundo_adjust(p, &suptr, semid,
			    sops[i].sem_num, -adjval);
			if (error == 0)
				continue;

			/*
			 * Oh-Oh!  We ran out of either sem_undo's or undo's.
			 * Rollback the adjustments to this point and then
			 * rollback the semaphore ups and down so we can return
			 * with an error with all structures restored.  We
			 * rollback the undo's in the exact reverse order that
			 * we applied them.  This guarantees that we won't run
			 * out of space as we roll things back out.
			 */
			while (i-- > 0) {
				if ((sops[i].sem_flg & SEM_UNDO) == 0)
					continue;
				adjval = sops[i].sem_op;
				if (adjval == 0)
					continue;
				if (semundo_adjust(p, &suptr, semid,
				    sops[i].sem_num, adjval) != 0)
					panic("semop - can't undo undos");
			}

			for (i = 0; i < nsops; i++)
				semaptr->_sem_base[sops[i].sem_num].semval -=
				    sops[i].sem_op;

			SEM_PRINTF(("error = %d from semundo_adjust\n", error));
			goto out;
		} /* loop through the sops */
	} /* if (do_undos) */

	/* We're definitely done - set the sempid's */
	for (i = 0; i < nsops; i++) {
		sopptr = &sops[i];
		semptr = &semaptr->_sem_base[sopptr->sem_num];
		semptr->sempid = p->p_pid;
	}

	/* Update sem_otime */
	semaptr->sem_otime = time_second;

	/* Do a wakeup if any semaphore was up'd. */
	if (do_wakeup) {
		SEM_PRINTF(("semop:  doing wakeup\n"));
		cv_broadcast(&semcv[semid]);
		SEM_PRINTF(("semop:  back from wakeup\n"));
	}
	SEM_PRINTF(("semop:  done\n"));
	*retval = 0;

 out:
	mutex_exit(&semlock);
	if (sops != small_sops)
		kmem_free(sops, nsops * sizeof(*sops));
	return error;
}
Exemple #2
0
int sysvipc_semop (int semid, struct sembuf *sops, unsigned nsops) {
	struct semid_pool *semaptr = NULL, *auxsemaptr = NULL;
	struct sembuf *sopptr;
	struct sem *semptr = NULL;
	struct sem *xsemptr = NULL;
	int eval = 0;
	int i, j;
	int do_undos;
	int val_to_sleep;

	sysv_print("[client %d] call to semop(%d, %u)\n",
			getpid(), semid, nsops);
//TODO
	/*if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
	  return (ENOSYS);
	  */

	semaptr = get_semaptr(semid, 0, IPC_W);
	if (!semaptr) {
		errno = EINVAL;
		return (-1);
	}

#ifdef SYSV_SEMS
	if (try_rwlock_rdlock(semid, semaptr) == -1) {
#else
	if (try_rwlock_wrlock(semid, semaptr) == -1) {
#endif
		sysv_print("sema removed\n");
		errno = EIDRM;
		goto done2;
	}

	if (nsops > MAX_SOPS) {
		sysv_print("too many sops (max=%d, nsops=%u)\n",
				getpid(), MAX_SOPS, nsops);
		eval = E2BIG;
		goto done;
	}

	/*
	* Loop trying to satisfy the vector of requests.
	* If we reach a point where we must wait, any requests already
	* performed are rolled back and we go to sleep until some other
	* process wakes us up.  At this point, we start all over again.
	*
	* This ensures that from the perspective of other tasks, a set
	* of requests is atomic (never partially satisfied).
	*/
	do_undos = 0;

	for (;;) {

		semptr = NULL;

		for (i = 0; i < (int)nsops; i++) {
			sopptr = &sops[i];

			if (sopptr->sem_num >= semaptr->ds.sem_nsems) {
				eval = EFBIG;
				goto done;
			}

			semptr = &semaptr->ds.sem_base[sopptr->sem_num];
#ifdef SYSV_SEMS
			sysv_mutex_lock(&semptr->sem_mutex);
#endif
			sysv_print("semop: sem[%d]=%d : op=%d, flag=%s\n",
				sopptr->sem_num, semptr->semval, sopptr->sem_op,
				(sopptr->sem_flg & IPC_NOWAIT) ? "nowait" : "wait");

			if (sopptr->sem_op < 0) {
				if (semptr->semval + sopptr->sem_op < 0) {
					sysv_print("semop:  can't do it now\n");
					break;
				} else {
					semptr->semval += sopptr->sem_op;
					if (semptr->semval == 0 &&
						semptr->semzcnt > 0)
						umtx_wakeup((int *)&semptr->semval, 0);
				}
				if (sopptr->sem_flg & SEM_UNDO)
					do_undos = 1;
			} else if (sopptr->sem_op == 0) {
				if (semptr->semval > 0) {
					sysv_print("semop:  not zero now\n");
					break;
				}
			} else {
				semptr->semval += sopptr->sem_op;
				if (sopptr->sem_flg & SEM_UNDO)
					do_undos = 1;
				if (semptr->semncnt > 0)
					umtx_wakeup((int *)&semptr->semval, 0);
			}
#ifdef SYSV_SEMS
			sysv_mutex_unlock(&semptr->sem_mutex);
#endif
		}

		/*
		 * Did we get through the entire vector?
		 */
		if (i >= (int)nsops)
			goto donex;

		if (sopptr->sem_op == 0)
			semptr->semzcnt++;
		else
			semptr->semncnt++;
#ifdef SYSV_SEMS
		sysv_mutex_unlock(&semptr->sem_mutex);
#endif
		/*
		 * Rollback the semaphores we had acquired.
		 */
		sysv_print("semop:  rollback 0 through %d\n", i-1);
		for (j = 0; j < i; j++) {
			xsemptr = &semaptr->ds.sem_base[sops[j].sem_num];
#ifdef SYSV_SEMS
			sysv_mutex_lock(&semptr->sem_mutex);
#endif
			xsemptr->semval -= sops[j].sem_op;
			if (xsemptr->semval == 0 && xsemptr->semzcnt > 0)
				umtx_wakeup((int *)&xsemptr->semval, 0);
			if (xsemptr->semval <= 0 && xsemptr->semncnt > 0)
				umtx_wakeup((int *)&xsemptr->semval, 0); //?!
#ifdef SYSV_SEMS
			sysv_mutex_unlock(&semptr->sem_mutex);
#endif
		}

		/*
		 * If the request that we couldn't satisfy has the
		 * NOWAIT flag set then return with EAGAIN.
		 */
		if (sopptr->sem_flg & IPC_NOWAIT) {
			eval = EAGAIN;
			goto done;
		}

		/*
		 * Release semaptr->lock while sleeping, allowing other
		 * semops (like SETVAL, SETALL, etc), which require an
		 * exclusive lock and might wake us up.
		 *
		 * Reload and recheck the validity of semaptr on return.
		 * Note that semptr itself might have changed too, but
		 * we've already interlocked for semptr and that is what
		 * will be woken up if it wakes up the tsleep on a MP
		 * race.
		 *
		 */

		sysv_print("semop:  good night!\n");
		val_to_sleep = semptr->semval;
		rwlock_unlock(semid, semaptr);
		put_shmdata(semid);

		/* We don't sleep more than SYSV_TIMEOUT because we could
		 * go to sleep after another process calls wakeup and remain
		 * blocked.
		 */
		eval = umtx_sleep((int *)&semptr->semval, val_to_sleep, SYSV_TIMEOUT);
		/* return code is checked below, after sem[nz]cnt-- */

		/*
		 * Make sure that the semaphore still exists
		 */

		/* Check if another thread didn't remove the semaphore. */
		auxsemaptr = get_semaptr(semid, 0, IPC_W); /* Redundant access check. */
		if (!auxsemaptr) {
			errno = EIDRM;
			return (-1);
		}
			
		if (auxsemaptr != semaptr) {
			errno = EIDRM;
			goto done;
		}

		/* Check if another process didn't remove the semaphore. */
#ifdef SYSV_SEMS
		if (try_rwlock_rdlock(semid, semaptr) == -1) {
#else
		if (try_rwlock_wrlock(semid, semaptr) == -1) {
#endif
			errno = EIDRM;
			goto done;
		}
		sysv_print("semop:  good morning (eval=%d)!\n", eval);

		/* The semaphore is still alive.  Readjust the count of
		 * waiting processes.
		 */
		semptr = &semaptr->ds.sem_base[sopptr->sem_num];
#ifdef SYSV_SEMS
		sysv_mutex_lock(&semptr->sem_mutex);
#endif
		if (sopptr->sem_op == 0)
			semptr->semzcnt--;
		else
			semptr->semncnt--;
#ifdef SYSV_SEMS
		sysv_mutex_unlock(&semptr->sem_mutex);
#endif

		/*
		 * Is it really morning, or was our sleep interrupted?
		 * (Delayed check of tsleep() return code because we
		 * need to decrement sem[nz]cnt either way.)
		 */
		if (eval) {
			eval = EINTR;
			goto done;
		}

		sysv_print("semop:  good morning!\n");
		/* RETRY LOOP */
}

donex:
	/*
	* Process any SEM_UNDO requests.
	*/
	if (do_undos) {
		for (i = 0; i < (int)nsops; i++) {
			/*
			 * We only need to deal with SEM_UNDO's for non-zero
			 * op's.
			 */
			int adjval;

			if ((sops[i].sem_flg & SEM_UNDO) == 0)
				continue;
			adjval = sops[i].sem_op;
			if (adjval == 0)
				continue;
			eval = semundo_adjust(semid, sops[i].sem_num, -adjval);
			if (eval == 0)
				continue;

			/*
			 * Oh-Oh!  We ran out of either sem_undo's or undo's.
			 * Rollback the adjustments to this point and then
			 * rollback the semaphore ups and down so we can return
			 * with an error with all structures restored.  We
			 * rollback the undo's in the exact reverse order that
			 * we applied them.  This guarantees that we won't run
			 * out of space as we roll things back out.
			 */
			for (j = i - 1; j >= 0; j--) {
				if ((sops[j].sem_flg & SEM_UNDO) == 0)
					continue;
				adjval = sops[j].sem_op;
				if (adjval == 0)
					continue;
				if (semundo_adjust(semid, sops[j].sem_num,
							adjval) != 0)
					sysv_print("semop - can't undo undos");
			}

			for (j = 0; j < (int)nsops; j++) {
				xsemptr = &semaptr->ds.sem_base[
					sops[j].sem_num];
#ifdef SYSV_SEMS
				sysv_mutex_lock(&semptr->sem_mutex);
#endif
				xsemptr->semval -= sops[j].sem_op;
				if (xsemptr->semval == 0 &&
						xsemptr->semzcnt > 0)
					umtx_wakeup((int *)&xsemptr->semval, 0);
				if (xsemptr->semval <= 0 &&
						xsemptr->semncnt > 0)
					umtx_wakeup((int *)&xsemptr->semval, 0); //?!
#ifdef SYSV_SEMS
				sysv_mutex_unlock(&semptr->sem_mutex);
#endif
			}

			sysv_print("eval = %d from semundo_adjust\n", eval);
			goto done;
		}
	}

	/* Set sempid field for each semaphore. */
	for (i = 0; i < (int)nsops; i++) {
		sopptr = &sops[i];
		semptr = &semaptr->ds.sem_base[sopptr->sem_num];
#ifdef SYSV_SEMS
		sysv_mutex_lock(&semptr->sem_mutex);
#endif
		semptr->sempid = getpid();
#ifdef SYSV_SEMS
		sysv_mutex_unlock(&semptr->sem_mutex);
#endif
	}

	sysv_print("semop:  done\n");
	semaptr->ds.sem_otime = time(NULL);
done:
	rwlock_unlock(semid, semaptr);
done2:
	put_shmdata(semid);

	return (eval);
}