예제 #1
0
/*
 * This function is used to acquire a contested lock.
 */
int
__sysv_umtx_lock(volatile umtx_t *mtx, int timo)
{
	int v, errval, ret = 0;

	/* contested */
	do {
		v = *mtx;
		if (v == 2 || atomic_cmpset_acq_int(mtx, 1, 2)) {
			if (timo == 0)
				umtx_sleep(mtx, 2, timo);
			else if ( (errval = umtx_sleep(mtx, 2, timo)) > 0) {
				if (errval == EAGAIN) {
					if (atomic_cmpset_acq_int(mtx, 0, 2))
						ret = 0;
					else
						ret = ETIMEDOUT;
					break;
				}
			}
		}
	} while (!atomic_cmpset_acq_int(mtx, 0, 2));

	return (ret);
}
예제 #2
0
void
cpu_idle(void)
{
	struct thread *td = curthread;
	struct mdglobaldata *gd = mdcpu;
	int reqflags;

	crit_exit();
	KKASSERT(td->td_critcount == 0);
	cpu_enable_intr();
	for (;;) {
		/*
		 * See if there are any LWKTs ready to go.
		 */
		lwkt_switch();

		/*
		 * The idle loop halts only if no threads are scheduleable
		 * and no signals have occured.
		 */
		if (cpu_idle_hlt &&
		    (td->td_gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) {
			splz();
#ifdef SMP
			KKASSERT(MP_LOCK_HELD() == 0);
#endif
			if ((td->td_gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) {
#ifdef DEBUGIDLE
				struct timeval tv1, tv2;
				gettimeofday(&tv1, NULL);
#endif
				reqflags = gd->mi.gd_reqflags &
					   ~RQF_IDLECHECK_WK_MASK;
				umtx_sleep(&gd->mi.gd_reqflags, reqflags,
					   1000000);
#ifdef DEBUGIDLE
				gettimeofday(&tv2, NULL);
				if (tv2.tv_usec - tv1.tv_usec +
				    (tv2.tv_sec - tv1.tv_sec) * 1000000 
				    > 500000) {
					kprintf("cpu %d idlelock %08x %08x\n",
						gd->mi.gd_cpuid,
						gd->mi.gd_reqflags,
						gd->gd_fpending);
				}
#endif
			}
			++cpu_idle_hltcnt;
		} else {
			splz();
#ifdef SMP
			__asm __volatile("pause");
#endif
			++cpu_idle_spincnt;
		}
	}
}
예제 #3
0
static int
userland_get_mutex_contested(struct umtx *mtx, int timo)
{
    int v;

    for (;;) {
	v = mtx->lock;
	assert(v & ~MTX_LOCKED);	/* our contesting count still there */
	if ((v & MTX_LOCKED) == 0) {
	    /*
	     * not locked, attempt to remove our contested count and
	     * lock at the same time.
	     */
	    if (cmp_and_exg(&mtx->lock, v, (v - 1) | MTX_LOCKED) == 0) {
		cleanup_mtx_contested = NULL;
		cleanup_mtx_held = mtx;
		return(0);
	    }
	} else {
	    /*
	     * Still locked, sleep and try again.
	     */
	    if (verbose_opt)
		fprintf(stderr, "waiting on mutex timeout=%d\n", timo);
	    if (timo == 0) {
		umtx_sleep(&mtx->lock, v, 0);
	    } else {
		if (umtx_sleep(&mtx->lock, v, 1000000) < 0) {
		    if (errno == EAGAIN && --timo == 0) {
			cleanup_mtx_contested = NULL;
			userland_rel_mutex_contested(mtx);
			return(-1);
		    }
		}
	    }
	}
    }
}
예제 #4
0
int sysvipc_semop (int semid, struct sembuf *sops, unsigned nsops) {
	struct semid_pool *semaptr = NULL, *auxsemaptr = NULL;
	struct sembuf *sopptr;
	struct sem *semptr = NULL;
	struct sem *xsemptr = NULL;
	int eval = 0;
	int i, j;
	int do_undos;
	int val_to_sleep;

	sysv_print("[client %d] call to semop(%d, %u)\n",
			getpid(), semid, nsops);
//TODO
	/*if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
	  return (ENOSYS);
	  */

	semaptr = get_semaptr(semid, 0, IPC_W);
	if (!semaptr) {
		errno = EINVAL;
		return (-1);
	}

#ifdef SYSV_SEMS
	if (try_rwlock_rdlock(semid, semaptr) == -1) {
#else
	if (try_rwlock_wrlock(semid, semaptr) == -1) {
#endif
		sysv_print("sema removed\n");
		errno = EIDRM;
		goto done2;
	}

	if (nsops > MAX_SOPS) {
		sysv_print("too many sops (max=%d, nsops=%u)\n",
				getpid(), MAX_SOPS, nsops);
		eval = E2BIG;
		goto done;
	}

	/*
	* Loop trying to satisfy the vector of requests.
	* If we reach a point where we must wait, any requests already
	* performed are rolled back and we go to sleep until some other
	* process wakes us up.  At this point, we start all over again.
	*
	* This ensures that from the perspective of other tasks, a set
	* of requests is atomic (never partially satisfied).
	*/
	do_undos = 0;

	for (;;) {

		semptr = NULL;

		for (i = 0; i < (int)nsops; i++) {
			sopptr = &sops[i];

			if (sopptr->sem_num >= semaptr->ds.sem_nsems) {
				eval = EFBIG;
				goto done;
			}

			semptr = &semaptr->ds.sem_base[sopptr->sem_num];
#ifdef SYSV_SEMS
			sysv_mutex_lock(&semptr->sem_mutex);
#endif
			sysv_print("semop: sem[%d]=%d : op=%d, flag=%s\n",
				sopptr->sem_num, semptr->semval, sopptr->sem_op,
				(sopptr->sem_flg & IPC_NOWAIT) ? "nowait" : "wait");

			if (sopptr->sem_op < 0) {
				if (semptr->semval + sopptr->sem_op < 0) {
					sysv_print("semop:  can't do it now\n");
					break;
				} else {
					semptr->semval += sopptr->sem_op;
					if (semptr->semval == 0 &&
						semptr->semzcnt > 0)
						umtx_wakeup((int *)&semptr->semval, 0);
				}
				if (sopptr->sem_flg & SEM_UNDO)
					do_undos = 1;
			} else if (sopptr->sem_op == 0) {
				if (semptr->semval > 0) {
					sysv_print("semop:  not zero now\n");
					break;
				}
			} else {
				semptr->semval += sopptr->sem_op;
				if (sopptr->sem_flg & SEM_UNDO)
					do_undos = 1;
				if (semptr->semncnt > 0)
					umtx_wakeup((int *)&semptr->semval, 0);
			}
#ifdef SYSV_SEMS
			sysv_mutex_unlock(&semptr->sem_mutex);
#endif
		}

		/*
		 * Did we get through the entire vector?
		 */
		if (i >= (int)nsops)
			goto donex;

		if (sopptr->sem_op == 0)
			semptr->semzcnt++;
		else
			semptr->semncnt++;
#ifdef SYSV_SEMS
		sysv_mutex_unlock(&semptr->sem_mutex);
#endif
		/*
		 * Rollback the semaphores we had acquired.
		 */
		sysv_print("semop:  rollback 0 through %d\n", i-1);
		for (j = 0; j < i; j++) {
			xsemptr = &semaptr->ds.sem_base[sops[j].sem_num];
#ifdef SYSV_SEMS
			sysv_mutex_lock(&semptr->sem_mutex);
#endif
			xsemptr->semval -= sops[j].sem_op;
			if (xsemptr->semval == 0 && xsemptr->semzcnt > 0)
				umtx_wakeup((int *)&xsemptr->semval, 0);
			if (xsemptr->semval <= 0 && xsemptr->semncnt > 0)
				umtx_wakeup((int *)&xsemptr->semval, 0); //?!
#ifdef SYSV_SEMS
			sysv_mutex_unlock(&semptr->sem_mutex);
#endif
		}

		/*
		 * If the request that we couldn't satisfy has the
		 * NOWAIT flag set then return with EAGAIN.
		 */
		if (sopptr->sem_flg & IPC_NOWAIT) {
			eval = EAGAIN;
			goto done;
		}

		/*
		 * Release semaptr->lock while sleeping, allowing other
		 * semops (like SETVAL, SETALL, etc), which require an
		 * exclusive lock and might wake us up.
		 *
		 * Reload and recheck the validity of semaptr on return.
		 * Note that semptr itself might have changed too, but
		 * we've already interlocked for semptr and that is what
		 * will be woken up if it wakes up the tsleep on a MP
		 * race.
		 *
		 */

		sysv_print("semop:  good night!\n");
		val_to_sleep = semptr->semval;
		rwlock_unlock(semid, semaptr);
		put_shmdata(semid);

		/* We don't sleep more than SYSV_TIMEOUT because we could
		 * go to sleep after another process calls wakeup and remain
		 * blocked.
		 */
		eval = umtx_sleep((int *)&semptr->semval, val_to_sleep, SYSV_TIMEOUT);
		/* return code is checked below, after sem[nz]cnt-- */

		/*
		 * Make sure that the semaphore still exists
		 */

		/* Check if another thread didn't remove the semaphore. */
		auxsemaptr = get_semaptr(semid, 0, IPC_W); /* Redundant access check. */
		if (!auxsemaptr) {
			errno = EIDRM;
			return (-1);
		}
			
		if (auxsemaptr != semaptr) {
			errno = EIDRM;
			goto done;
		}

		/* Check if another process didn't remove the semaphore. */
#ifdef SYSV_SEMS
		if (try_rwlock_rdlock(semid, semaptr) == -1) {
#else
		if (try_rwlock_wrlock(semid, semaptr) == -1) {
#endif
			errno = EIDRM;
			goto done;
		}
		sysv_print("semop:  good morning (eval=%d)!\n", eval);

		/* The semaphore is still alive.  Readjust the count of
		 * waiting processes.
		 */
		semptr = &semaptr->ds.sem_base[sopptr->sem_num];
#ifdef SYSV_SEMS
		sysv_mutex_lock(&semptr->sem_mutex);
#endif
		if (sopptr->sem_op == 0)
			semptr->semzcnt--;
		else
			semptr->semncnt--;
#ifdef SYSV_SEMS
		sysv_mutex_unlock(&semptr->sem_mutex);
#endif

		/*
		 * Is it really morning, or was our sleep interrupted?
		 * (Delayed check of tsleep() return code because we
		 * need to decrement sem[nz]cnt either way.)
		 */
		if (eval) {
			eval = EINTR;
			goto done;
		}

		sysv_print("semop:  good morning!\n");
		/* RETRY LOOP */
}

donex:
	/*
	* Process any SEM_UNDO requests.
	*/
	if (do_undos) {
		for (i = 0; i < (int)nsops; i++) {
			/*
			 * We only need to deal with SEM_UNDO's for non-zero
			 * op's.
			 */
			int adjval;

			if ((sops[i].sem_flg & SEM_UNDO) == 0)
				continue;
			adjval = sops[i].sem_op;
			if (adjval == 0)
				continue;
			eval = semundo_adjust(semid, sops[i].sem_num, -adjval);
			if (eval == 0)
				continue;

			/*
			 * Oh-Oh!  We ran out of either sem_undo's or undo's.
			 * Rollback the adjustments to this point and then
			 * rollback the semaphore ups and down so we can return
			 * with an error with all structures restored.  We
			 * rollback the undo's in the exact reverse order that
			 * we applied them.  This guarantees that we won't run
			 * out of space as we roll things back out.
			 */
			for (j = i - 1; j >= 0; j--) {
				if ((sops[j].sem_flg & SEM_UNDO) == 0)
					continue;
				adjval = sops[j].sem_op;
				if (adjval == 0)
					continue;
				if (semundo_adjust(semid, sops[j].sem_num,
							adjval) != 0)
					sysv_print("semop - can't undo undos");
			}

			for (j = 0; j < (int)nsops; j++) {
				xsemptr = &semaptr->ds.sem_base[
					sops[j].sem_num];
#ifdef SYSV_SEMS
				sysv_mutex_lock(&semptr->sem_mutex);
#endif
				xsemptr->semval -= sops[j].sem_op;
				if (xsemptr->semval == 0 &&
						xsemptr->semzcnt > 0)
					umtx_wakeup((int *)&xsemptr->semval, 0);
				if (xsemptr->semval <= 0 &&
						xsemptr->semncnt > 0)
					umtx_wakeup((int *)&xsemptr->semval, 0); //?!
#ifdef SYSV_SEMS
				sysv_mutex_unlock(&semptr->sem_mutex);
#endif
			}

			sysv_print("eval = %d from semundo_adjust\n", eval);
			goto done;
		}
	}

	/* Set sempid field for each semaphore. */
	for (i = 0; i < (int)nsops; i++) {
		sopptr = &sops[i];
		semptr = &semaptr->ds.sem_base[sopptr->sem_num];
#ifdef SYSV_SEMS
		sysv_mutex_lock(&semptr->sem_mutex);
#endif
		semptr->sempid = getpid();
#ifdef SYSV_SEMS
		sysv_mutex_unlock(&semptr->sem_mutex);
#endif
	}

	sysv_print("semop:  done\n");
	semaptr->ds.sem_otime = time(NULL);
done:
	rwlock_unlock(semid, semaptr);
done2:
	put_shmdata(semid);

	return (eval);
}