Exemple #1
0
/*
 * Share-lock a mutex, block until acquired.  Recursion is allowed.
 *
 * Returns 0 on success, or the tsleep() return code on failure.
 * An error can only be returned if PCATCH is specified in the flags.
 *
 * NOTE: Shared locks get a mass-wakeup so if the tsleep fails we
 *	 do not have to chain the wakeup().
 */
static __inline int
__mtx_lock_sh(mtx_t mtx, const char *ident, int flags, int to)
{
	u_int	lock;
	u_int	nlock;
	int	error;

	for (;;) {
		lock = mtx->mtx_lock;
		if ((lock & MTX_EXCLUSIVE) == 0) {
			KKASSERT((lock & MTX_MASK) != MTX_MASK);
			nlock = lock + 1;
			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
				error = 0;
				break;
			}
		} else {
			nlock = lock | MTX_SHWANTED;
			tsleep_interlock(mtx, 0);
			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
				error = tsleep(mtx, flags, ident, to);
				if (error)
					break;
				++mtx_contention_count;
				/* retry */
			} else {
				tsleep_remove(curthread);
			}
		}
		++mtx_collision_count;
	}
	return (error);
}
Exemple #2
0
/*
 * Get an exclusive spinlock the hard way.
 */
void
_mtx_spinlock(mtx_t mtx)
{
	u_int	lock;
	u_int	nlock;
	int	bb = 1;
	int	bo;

	for (;;) {
		lock = mtx->mtx_lock;
		if (lock == 0) {
			nlock = MTX_EXCLUSIVE | 1;
			if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
				mtx->mtx_owner = curthread;
				break;
			}
		} else if ((lock & MTX_EXCLUSIVE) &&
			   mtx->mtx_owner == curthread) {
			KKASSERT((lock & MTX_MASK) != MTX_MASK);
			nlock = lock + 1;
			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
				break;
		} else {
			/* MWAIT here */
			if (bb < 1000)
				++bb;
			cpu_pause();
			for (bo = 0; bo < bb; ++bo)
				;
			++mtx_contention_count;
		}
		cpu_pause();
		++mtx_collision_count;
	}
}
Exemple #3
0
/*
 * Try to obtain an exclusive lock
 */
int
hammer_lock_ex_try(struct hammer_lock *lock)
{
	thread_t td = curthread;
	int error;
	u_int lv;
	u_int nlv;

	KKASSERT(lock->refs);
	for (;;) {
		lv = lock->lockval;

		if (lv == 0) {
			nlv = 1 | HAMMER_LOCKF_EXCLUSIVE;
			if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
				lock->lowner = td;
				error = 0;
				break;
			}
		} else if ((lv & HAMMER_LOCKF_EXCLUSIVE) &&
			   lock->lowner == td) {
			nlv = (lv + 1);
			if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
				error = 0;
				break;
			}
		} else {
			error = EAGAIN;
			break;
		}
	}
	return (error);
}
Exemple #4
0
/*
 * Attempt to acquire a spinlock, if we fail we must undo the
 * gd->gd_spinlocks_wr/gd->gd_curthead->td_critcount predisposition.
 *
 * Returns 0 on success, EAGAIN on failure.
 */
int
_mtx_spinlock_try(mtx_t mtx)
{
	globaldata_t gd = mycpu;
	u_int	lock;
	u_int	nlock;
	int	res = 0;

	for (;;) {
		lock = mtx->mtx_lock;
		if (lock == 0) {
			nlock = MTX_EXCLUSIVE | 1;
			if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
				mtx->mtx_owner = gd->gd_curthread;
				break;
			}
		} else if ((lock & MTX_EXCLUSIVE) &&
			   mtx->mtx_owner == gd->gd_curthread) {
			KKASSERT((lock & MTX_MASK) != MTX_MASK);
			nlock = lock + 1;
			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
				break;
		} else {
			--gd->gd_spinlocks_wr;
			cpu_ccfence();
			--gd->gd_curthread->td_critcount;
			res = EAGAIN;
			break;
		}
		cpu_pause();
		++mtx_collision_count;
	}
	return res;
}
Exemple #5
0
void
hammer_unlock(struct hammer_lock *lock)
{
	thread_t td __debugvar = curthread;
	u_int lv;
	u_int nlv;

	lv = lock->lockval;
	KKASSERT(lv != 0);
	if (lv & HAMMER_LOCKF_EXCLUSIVE)
		KKASSERT(lock->lowner == td);

	for (;;) {
		lv = lock->lockval;
		nlv = lv & ~(HAMMER_LOCKF_EXCLUSIVE | HAMMER_LOCKF_WANTED);
		if (nlv > 1) {
			nlv = lv - 1;
			if (atomic_cmpset_int(&lock->lockval, lv, nlv))
				break;
		} else if (nlv == 1) {
			nlv = 0;
			if (lv & HAMMER_LOCKF_EXCLUSIVE)
				lock->lowner = NULL;
			if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
				if (lv & HAMMER_LOCKF_WANTED)
					wakeup(&lock->lockval);
				break;
			}
		} else {
			panic("hammer_unlock: lock %p is not held", lock);
		}
	}
}
Exemple #6
0
int
_mtx_lock_ex_try(mtx_t mtx)
{
	u_int	lock;
	u_int	nlock;
	int	error = 0;

	for (;;) {
		lock = mtx->mtx_lock;
		if (lock == 0) {
			nlock = MTX_EXCLUSIVE | 1;
			if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
				mtx->mtx_owner = curthread;
				break;
			}
		} else if ((lock & MTX_EXCLUSIVE) &&
			   mtx->mtx_owner == curthread) {
			KKASSERT((lock & MTX_MASK) != MTX_MASK);
			nlock = lock + 1;
			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
				break;
		} else {
			error = EAGAIN;
			break;
		}
		cpu_pause();
		++mtx_collision_count;
	}
	return (error);
}
Exemple #7
0
/*
 * get - lock and return the f_offset field.
 * set - set and unlock the f_offset field.
 *
 * These routines serve the dual purpose of serializing access to the
 * f_offset field (at least on i386) and guaranteeing operational integrity
 * when multiple read()ers and write()ers are present on the same fp.
 *
 * MPSAFE
 */
static __inline off_t
vn_get_fpf_offset(struct file *fp)
{
	u_int	flags;
	u_int	nflags;

	/*
	 * Shortcut critical path.
	 */
	flags = fp->f_flag & ~FOFFSETLOCK;
	if (atomic_cmpset_int(&fp->f_flag, flags, flags | FOFFSETLOCK))
		return(fp->f_offset);

	/*
	 * The hard way
	 */
	for (;;) {
		flags = fp->f_flag;
		if (flags & FOFFSETLOCK) {
			nflags = flags | FOFFSETWAKE;
			tsleep_interlock(&fp->f_flag, 0);
			if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
				tsleep(&fp->f_flag, PINTERLOCKED, "fpoff", 0);
		} else {
			nflags = flags | FOFFSETLOCK;
			if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
				break;
		}
	}
	return(fp->f_offset);
}
Exemple #8
0
/*
 * Drop an inode reference, freeing the inode when the last reference goes
 * away.
 */
void
hammer2_inode_drop(hammer2_inode_t *ip)
{
	hammer2_pfs_t *pmp;
	u_int refs;

	while (ip) {
		if (hammer2_debug & 0x80000) {
			kprintf("INODE-1 %p (%d->%d)\n",
				ip, ip->refs, ip->refs - 1);
			print_backtrace(8);
		}
		refs = ip->refs;
		cpu_ccfence();
		if (refs == 1) {
			/*
			 * Transition to zero, must interlock with
			 * the inode inumber lookup tree (if applicable).
			 * It should not be possible for anyone to race
			 * the transition to 0.
			 */
			pmp = ip->pmp;
			KKASSERT(pmp);
			hammer2_spin_ex(&pmp->inum_spin);

			if (atomic_cmpset_int(&ip->refs, 1, 0)) {
				KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
				if (ip->flags & HAMMER2_INODE_ONRBTREE) {
					atomic_clear_int(&ip->flags,
						     HAMMER2_INODE_ONRBTREE);
					RB_REMOVE(hammer2_inode_tree,
						  &pmp->inum_tree, ip);
					--pmp->inum_count;
				}
				hammer2_spin_unex(&pmp->inum_spin);

				ip->pmp = NULL;

				/*
				 * Cleaning out ip->cluster isn't entirely
				 * trivial.
				 */
				hammer2_inode_repoint(ip, NULL, NULL);

				kfree(ip, pmp->minode);
				atomic_add_long(&pmp->inmem_inodes, -1);
				ip = NULL;	/* will terminate loop */
			} else {
				hammer2_spin_unex(&ip->pmp->inum_spin);
			}
		} else {
			/*
			 * Non zero transition
			 */
			if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
				break;
		}
	}
}
Exemple #9
0
/*
 * hammer_rel_interlock() works a bit differently in that it must
 * acquire the lock in tandem with a 1->0 transition.  CHECK is
 * not used.
 *
 * TRUE is returned on 1->0 transitions with the lock held on return
 * and FALSE is returned otherwise with the lock not held.
 *
 * It is important to note that the refs are not stable and may
 * increase while we hold the lock, the TRUE indication only means
 * that we transitioned 1->0, not necessarily that we stayed at 0.
 *
 * Another thread bumping refs while we hold the lock will set CHECK,
 * causing one of the competing hammer_ref_interlock() calls to
 * return TRUE after we release our lock.
 *
 * MPSAFE
 */
int
hammer_rel_interlock(struct hammer_lock *lock, int locked)
{
	u_int lv;
	u_int nlv;

	/*
	 * In locked mode (failure/unload path) we release the
	 * ref-count but leave it locked.
	 */
	if (locked) {
		hammer_rel(lock);
		return(1);
	}

	/*
	 * Integrated reference count drop with LOCKED, plus the hot-path
	 * returns.
	 */
	for (;;) {
		lv = lock->refs;

		if (lv == 1) {
			nlv = 0 | HAMMER_REFS_LOCKED;
			if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
				lock->rowner = curthread;
				return(1);
			}
		} else if ((lv & ~HAMMER_REFS_FLAGS) == 1) {
			if ((lv & HAMMER_REFS_LOCKED) == 0) {
				nlv = (lv - 1) | HAMMER_REFS_LOCKED;
				if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
					lock->rowner = curthread;
					return(1);
				}
			} else {
				nlv = lv | HAMMER_REFS_WANTED;
				tsleep_interlock(&lock->refs, 0);
				if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
					tsleep(&lock->refs, PINTERLOCKED,
					       "h0lk", 0);
				}
			}
		} else {
			nlv = (lv - 1);
			KKASSERT((int)nlv >= 0);
			if (atomic_cmpset_int(&lock->refs, lv, nlv))
				return(0);
		}
	}
	/* not reached */
}
Exemple #10
0
int
ieee80211_node_dectestref(struct ieee80211_node *ni)
{
	/* XXX need equivalent of atomic_dec_and_test */
	atomic_subtract_int(&ni->ni_refcnt, 1);
	return atomic_cmpset_int(&ni->ni_refcnt, 0, 1);
}
Exemple #11
0
/*
 * Downgrade an exclusively held lock to a shared lock.
 */
void
hammer_lock_downgrade(struct hammer_lock *lock, int shcount)
{
	thread_t td __debugvar = curthread;
	u_int lv;
	u_int nlv;

	KKASSERT((lock->lockval & ~HAMMER_LOCKF_WANTED) ==
		 (HAMMER_LOCKF_EXCLUSIVE | shcount));
	KKASSERT(lock->lowner == td);

	/*
	 * NOTE: Must clear owner before releasing exclusivity
	 */
	lock->lowner = NULL;

	for (;;) {
		lv = lock->lockval;
		nlv = lv & ~(HAMMER_LOCKF_EXCLUSIVE | HAMMER_LOCKF_WANTED);
		if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
			if (lv & HAMMER_LOCKF_WANTED)
				wakeup(&lock->lockval);
			break;
		}
	}
}
Exemple #12
0
/*
 * Upgrade a shared lock to an exclusively held lock.  This function will
 * return EDEADLK If there is more then one shared holder.
 *
 * No error occurs and no action is taken if the lock is already exclusively
 * held by the caller.  If the lock is not held at all or held exclusively
 * by someone else, this function will panic.
 */
int
hammer_lock_upgrade(struct hammer_lock *lock, int shcount)
{
	thread_t td = curthread;
	u_int lv;
	u_int nlv;
	int error;

	for (;;) {
		lv = lock->lockval;

		if ((lv & ~HAMMER_LOCKF_WANTED) == shcount) {
			nlv = lv | HAMMER_LOCKF_EXCLUSIVE;
			if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
				lock->lowner = td;
				error = 0;
				break;
			}
		} else if (lv & HAMMER_LOCKF_EXCLUSIVE) {
			if (lock->lowner != curthread)
				panic("hammer_lock_upgrade: illegal state");
			error = 0;
			break;
		} else if ((lv & ~HAMMER_LOCKF_WANTED) == 0) {
			panic("hammer_lock_upgrade: lock is not held");
			/* NOT REACHED */
			error = EDEADLK;
			break;
		} else {
			error = EDEADLK;
			break;
		}
	}
	return (error);
}
/*ARGSUSED*/
int
drm_getmagic(DRM_IOCTL_ARGS)
{
	DRM_DEVICE;
	static drm_magic_t sequence = 0;
	drm_auth_t auth;

	/* Find unique magic */
	if (fpriv->magic) {
		auth.magic = fpriv->magic;
	} else {
		do {
			int old = sequence;
			auth.magic = old+1;
			if (!atomic_cmpset_int(&sequence, old, auth.magic))
				continue;
		} while (drm_find_file(dev, auth.magic));
		fpriv->magic = auth.magic;
		(void) drm_add_magic(dev, fpriv, auth.magic);
	}


	DRM_DEBUG("drm_getmagic: %u", auth.magic);

	DRM_COPYTO_WITH_RETURN((void *)data, &auth, sizeof (auth));

	return (0);
}
Exemple #14
0
void
_mtx_spinlock_sh(mtx_t *mtx)
{
	u_int	lock;
	u_int	nlock;
	int	bb = 1;
	int	bo;

	for (;;) {
		lock = mtx->mtx_lock;
		if ((lock & MTX_EXCLUSIVE) == 0) {
			KKASSERT((lock & MTX_MASK) != MTX_MASK);
			nlock = lock + 1;
			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
				break;
		} else {
			/* MWAIT here */
			if (bb < 1000)
				++bb;
			cpu_pause();
			for (bo = 0; bo < bb; ++bo)
				;
		}
		cpu_pause();
	}
}
Exemple #15
0
/*
 * Upgrade a shared lock to an exclusive lock.  The upgrade will fail if
 * the shared lock has a count other then 1.  Optimize the most likely case
 * but note that a single cmpset can fail due to WANTED races.
 *
 * If the lock is held exclusively it must be owned by the caller and
 * this function will simply return without doing anything.   A panic will
 * occur if the lock is held exclusively by someone other then the caller.
 *
 * Returns 0 on success, EDEADLK on failure.
 */
int
_mtx_upgrade_try(mtx_t mtx)
{
	u_int	lock;
	u_int	nlock;
	int	error = 0;

	for (;;) {
		lock = mtx->mtx_lock;

		if ((lock & ~MTX_EXWANTED) == 1) {
			nlock = lock | MTX_EXCLUSIVE;
			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
				mtx->mtx_owner = curthread;
				break;
			}
		} else if (lock & MTX_EXCLUSIVE) {
			KKASSERT(mtx->mtx_owner == curthread);
			break;
		} else {
			error = EDEADLK;
			break;
		}
		cpu_pause();
		++mtx_collision_count;
	}
	return (error);
}
Exemple #16
0
/*
 * If the lock is held exclusively it must be owned by the caller.  If the
 * lock is already a shared lock this operation is a NOP.  A panic will
 * occur if the lock is not held either shared or exclusive.
 *
 * The exclusive count is converted to a shared count.
 */
void
_mtx_downgrade(mtx_t mtx)
{
	u_int	lock;
	u_int	nlock;

	for (;;) {
		lock = mtx->mtx_lock;
		if ((lock & MTX_EXCLUSIVE) == 0) {
			KKASSERT((lock & MTX_MASK) > 0);
			break;
		}
		KKASSERT(mtx->mtx_owner == curthread);
		nlock = lock & ~(MTX_EXCLUSIVE | MTX_SHWANTED);
		if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
			if (lock & MTX_SHWANTED) {
				wakeup(mtx);
				++mtx_wakeup_count;
			}
			break;
		}
		cpu_pause();
		++mtx_collision_count;
	}
}
/*
 * send an IPI to a set of cpus.
 */
void
ipi_selected(u_int32_t cpus, u_int ipi)
{
	struct pcpu *pcpu;
	u_int cpuid, new_pending, old_pending;

	CTR3(KTR_SMP, "%s: cpus: %x, ipi: %x\n", __func__, cpus, ipi);

	while ((cpuid = ffs(cpus)) != 0) {
		cpuid--;
		cpus &= ~(1 << cpuid);
		pcpu = pcpu_find(cpuid);

		if (pcpu) {
			do {
				old_pending = pcpu->pc_pending_ipis;
				new_pending = old_pending | ipi;
			} while (!atomic_cmpset_int(&pcpu->pc_pending_ipis,
			    old_pending, new_pending));	

			if (old_pending)
				continue;

			mips_ipi_send (cpuid);
		}
	}
}
Exemple #18
0
/**
 * Called by the client, this returns a unique magic number to be authorized
 * by the master.
 *
 * The master may use its own knowledge of the client (such as the X
 * connection that the magic is passed over) to determine if the magic number
 * should be authenticated.
 */
int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	static drm_magic_t sequence = 0;
	struct drm_auth *auth = data;

	/* Find unique magic */
	if (file_priv->magic) {
		auth->magic = file_priv->magic;
	} else {
		DRM_LOCK();
		do {
			int old = sequence;

			auth->magic = old+1;

			if (!atomic_cmpset_int(&sequence, old, auth->magic))
				continue;
		} while (drm_find_file(dev, auth->magic));
		file_priv->magic = auth->magic;
		drm_add_magic(dev, file_priv, auth->magic);
		DRM_UNLOCK();
	}

	DRM_DEBUG("%u\n", auth->magic);

	return 0;
}
Exemple #19
0
/*
 * Delete a link structure after tsleep has failed.  This code is not
 * in the critical path as most exclusive waits are chained.
 */
static
void
mtx_delete_link(mtx_t *mtx, mtx_link_t *link)
{
	thread_t td = curthread;
	u_int	lock;
	u_int	nlock;

	/*
	 * Acquire MTX_LINKSPIN.
	 *
	 * Do not use cmpxchg to wait for LINKSPIN to clear as this might
	 * result in too much cpu cache traffic.
	 */
	crit_enter_raw(td);
	for (;;) {
		lock = mtx->mtx_lock;
		if (lock & MTX_LINKSPIN) {
			cpu_pause();
			continue;
		}
		nlock = lock | MTX_LINKSPIN;
		if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
			break;
		cpu_pause();
	}

	/*
	 * Delete the link and release LINKSPIN.
	 */
	nlock = MTX_LINKSPIN;	/* to clear */

	switch(link->state) {
	case MTX_LINK_LINKED_EX:
		if (link->next == link) {
			mtx->mtx_exlink = NULL;
			nlock |= MTX_EXWANTED;	/* to clear */
		} else {
			mtx->mtx_exlink = link->next;
			link->next->prev = link->prev;
			link->prev->next = link->next;
		}
		break;
	case MTX_LINK_LINKED_SH:
		if (link->next == link) {
			mtx->mtx_shlink = NULL;
			nlock |= MTX_SHWANTED;	/* to clear */
		} else {
			mtx->mtx_shlink = link->next;
			link->next->prev = link->prev;
			link->prev->next = link->next;
		}
		break;
	default:
		/* no change */
		break;
	}
	atomic_clear_int(&mtx->mtx_lock, nlock);
	crit_exit_raw(td);
}
Exemple #20
0
/*
 * If the lock is held exclusively it must be owned by the caller.  If the
 * lock is already a shared lock this operation is a NOP.  A panic will
 * occur if the lock is not held either shared or exclusive.
 *
 * The exclusive count is converted to a shared count.
 */
void
_mtx_downgrade(mtx_t *mtx)
{
	u_int	lock;
	u_int	nlock;

	for (;;) {
		lock = mtx->mtx_lock;
		cpu_ccfence();

		/*
		 * NOP if already shared.
		 */
		if ((lock & MTX_EXCLUSIVE) == 0) {
			KKASSERT((lock & MTX_MASK) > 0);
			break;
		}

		/*
		 * Transfer count to shared.  Any additional pending shared
		 * waiters must be woken up.
		 */
		if (lock & MTX_SHWANTED) {
			if (mtx_chain_link_sh(mtx, lock))
				break;
			/* retry */
		} else {
			nlock = lock & ~MTX_EXCLUSIVE;
			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
				break;
			/* retry */
		}
		cpu_pause();
	}
}
Exemple #21
0
/*
 * Release a ref on an active or inactive vnode.
 *
 * Caller has no other requirements.
 *
 * If VREF_FINALIZE is set this will deactivate the vnode on the 1->0
 * transition, otherwise we leave the vnode in the active list and
 * do a lockless transition to 0, which is very important for the
 * critical path.
 *
 * (vrele() is not called when a vnode is being destroyed w/kfree)
 */
void
vrele(struct vnode *vp)
{
	for (;;) {
		int count = vp->v_refcnt;
		cpu_ccfence();
		KKASSERT((count & VREF_MASK) > 0);
		KKASSERT(vp->v_state == VS_ACTIVE ||
			 vp->v_state == VS_INACTIVE);

		/*
		 * 2+ case
		 */
		if ((count & VREF_MASK) > 1) {
			if (atomic_cmpset_int(&vp->v_refcnt, count, count - 1))
				break;
			continue;
		}

		/*
		 * 1->0 transition case must handle possible finalization.
		 * When finalizing we transition 1->0x40000000.  Note that
		 * cachedvnodes is only adjusted on transitions to ->0.
		 *
		 * WARNING! VREF_TERMINATE can be cleared at any point
		 *	    when the refcnt is non-zero (by vget()) and
		 *	    the vnode has not been reclaimed.  Thus
		 *	    transitions out of VREF_TERMINATE do not have
		 *	    to mess with cachedvnodes.
		 */
		if (count & VREF_FINALIZE) {
			vx_lock(vp);
			if (atomic_cmpset_int(&vp->v_refcnt,
					      count, VREF_TERMINATE)) {
				vnode_terminate(vp);
				break;
			}
			vx_unlock(vp);
		} else {
			if (atomic_cmpset_int(&vp->v_refcnt, count, 0)) {
				atomic_add_int(&mycpu->gd_cachedvnodes, 1);
				break;
			}
		}
		/* retry */
	}
}
Exemple #22
0
/*
 * Start or restart a timeout.  Installs the callout structure on the
 * callwheel.  Callers may legally pass any value, even if 0 or negative,
 * but since the sc->curticks index may have already been processed a
 * minimum timeout of 1 tick will be enforced.
 *
 * This function will block if the callout is currently queued to a different
 * cpu or the callback is currently running in another thread.
 */
void
callout_reset(struct callout *c, int to_ticks, void (*ftn)(void *), void *arg)
{
	softclock_pcpu_t sc;
	globaldata_t gd;

#ifdef INVARIANTS
        if ((c->c_flags & CALLOUT_DID_INIT) == 0) {
		callout_init(c);
		kprintf(
		    "callout_reset(%p) from %p: callout was not initialized\n",
		    c, ((int **)&c)[-1]);
		print_backtrace(-1);
	}
#endif
	gd = mycpu;
	sc = &softclock_pcpu_ary[gd->gd_cpuid];
	crit_enter_gd(gd);

	/*
	 * Our cpu must gain ownership of the callout and cancel anything
	 * still running, which is complex.  The easiest way to do it is to
	 * issue a callout_stop().
	 *
	 * Clearing bits on flags is a way to guarantee they are not set,
	 * as the cmpset atomic op will fail otherwise.  PENDING and ARMED
	 * must not be set, if we find them set we loop up and call
	 * stop_sync() again.
	 *
	 */
	for (;;) {
		int flags;
		int nflags;

		callout_stop_sync(c);
		flags = c->c_flags & ~(CALLOUT_PENDING | CALLOUT_ARMED);
		nflags = (flags & ~(CALLOUT_CPU_MASK |
				    CALLOUT_EXECUTED)) |
			 CALLOUT_CPU_TO_FLAGS(gd->gd_cpuid) |
			 CALLOUT_ARMED |
			 CALLOUT_PENDING |
			 CALLOUT_ACTIVE;
		if (atomic_cmpset_int(&c->c_flags, flags, nflags))
			break;
	}


	if (to_ticks <= 0)
		to_ticks = 1;

	c->c_arg = arg;
	c->c_func = ftn;
	c->c_time = sc->curticks + to_ticks;

	TAILQ_INSERT_TAIL(&sc->callwheel[c->c_time & cwheelmask],
			  c, c_links.tqe);
	crit_exit_gd(gd);
}
Exemple #23
0
/*
 * When called the executing CPU will send an IPI to all other CPUs
 *  requesting that they halt execution.
 *
 * Usually (but not necessarily) called with 'other_cpus' as its arg.
 *
 *  - Signals all CPUs in map to stop.
 *  - Waits for each to stop.
 *
 * Returns:
 *  -1: error
 *   0: NA
 *   1: ok
 *
 */
static int
generic_stop_cpus(cpuset_t map, u_int type)
{
#ifdef KTR
	char cpusetbuf[CPUSETBUFSIZ];
#endif
	static volatile u_int stopping_cpu = NOCPU;
	int i;
	volatile cpuset_t *cpus;

	KASSERT(
#if defined(__amd64__) || defined(__i386__)
	    type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND,
#else
	    type == IPI_STOP || type == IPI_STOP_HARD,
#endif
	    ("%s: invalid stop type", __func__));

	if (!smp_started)
		return (0);

	CTR2(KTR_SMP, "stop_cpus(%s) with %u type",
	    cpusetobj_strprint(cpusetbuf, &map), type);

	if (stopping_cpu != PCPU_GET(cpuid))
		while (atomic_cmpset_int(&stopping_cpu, NOCPU,
		    PCPU_GET(cpuid)) == 0)
			while (stopping_cpu != NOCPU)
				cpu_spinwait(); /* spin */

	/* send the stop IPI to all CPUs in map */
	ipi_selected(map, type);

#if defined(__amd64__) || defined(__i386__)
	if (type == IPI_SUSPEND)
		cpus = &suspended_cpus;
	else
#endif
		cpus = &stopped_cpus;

	i = 0;
	while (!CPU_SUBSET(cpus, &map)) {
		/* spin */
		cpu_spinwait();
		i++;
		if (i == 100000000) {
			printf("timeout stopping cpus\n");
			break;
		}
	}

	stopping_cpu = NOCPU;
	return (1);
}
Exemple #24
0
/*
 * Drop the ref count for a lock (not the excl/share count, but a separate
 * structural reference count).  The CHECK flag will be cleared on a 1->0
 * transition.
 *
 * This function does nothing to serialize races between multple threads.
 *
 * MPSAFE
 */
void
hammer_rel(struct hammer_lock *lock)
{
	u_int lv;
	u_int nlv;

	for (;;) {
		lv = lock->refs;
		if ((lv & ~HAMMER_REFS_FLAGS) == 1) {
			nlv = (lv - 1) & ~HAMMER_REFS_CHECK;
			if (atomic_cmpset_int(&lock->refs, lv, nlv))
				return;
		} else {
			KKASSERT((int)lv > 0);
			nlv = (lv - 1);
			if (atomic_cmpset_int(&lock->refs, lv, nlv))
				return;
		}
	}
	/* not reached */
}
Exemple #25
0
static ACPI_STATUS
acpi_task_enqueue(int priority, ACPI_OSD_EXEC_CALLBACK Function, void *Context)
{
    struct acpi_task_ctx *at;
    int i;

    for (at = NULL, i = 0; i < acpi_max_tasks; i++)
	if (atomic_cmpset_int(&acpi_tasks[i].at_flag, ACPI_TASK_FREE,
	    ACPI_TASK_USED)) {
	    at = &acpi_tasks[i];
	    acpi_task_count++;
	    break;
	}

    if (i > acpi_tasks_hiwater)
	atomic_cmpset_int(&acpi_tasks_hiwater, acpi_tasks_hiwater, i);

    if (at == NULL) {
	printf("AcpiOsExecute: failed to enqueue task, consider increasing "
	    "the debug.acpi.max_tasks tunable\n");
	return (AE_NO_MEMORY);
    }

    TASK_INIT(&at->at_task, priority, acpi_task_execute, at);
    at->at_function = Function;
    at->at_context = Context;

    /*
     * If the task queue is ready, enqueue it now.
     */
    if (acpi_taskq_started) {
	atomic_set_int(&at->at_flag, ACPI_TASK_ENQUEUED);
	taskqueue_enqueue(acpi_taskq, &at->at_task);
	return (AE_OK);
    }
    if (bootverbose)
	printf("AcpiOsExecute: task queue not started\n");

    return (AE_OK);
}
Exemple #26
0
/*
 * Acquire the interlock on lock->refs.
 *
 * Return TRUE if CHECK is currently set.  Note that CHECK will not
 * be set if the reference count is 0, but can get set if this function
 * is preceeded by, say, hammer_ref(), or through races with other
 * threads.  The return value allows the caller to use the same logic
 * as hammer_ref_interlock().
 *
 * MPSAFE
 */
int
hammer_get_interlock(struct hammer_lock *lock)
{
	u_int lv;
	u_int nlv;

	for (;;) {
		lv = lock->refs;
		if (lv & HAMMER_REFS_LOCKED) {
			nlv = lv | HAMMER_REFS_WANTED;
			tsleep_interlock(&lock->refs, 0);
			if (atomic_cmpset_int(&lock->refs, lv, nlv))
				tsleep(&lock->refs, PINTERLOCKED, "hilk", 0);
		} else {
			nlv = (lv | HAMMER_REFS_LOCKED);
			if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
				lock->rowner = curthread;
				return((lv & HAMMER_REFS_CHECK) ? 1 : 0);
			}
		}
	}
}
Exemple #27
0
void
randomdev_unblock(void)
{
	if (!random_context.seeded) {
		selwakeuppri(&random_context.rsel, PUSER);
		wakeup(&random_context);
                printf("random: unblocking device.\n");
		random_context.seeded = 1;
	}
	/* Do arc4random(9) a favour while we are about it. */
	(void)atomic_cmpset_int(&arc4rand_iniseed_state, ARC4_ENTR_NONE,
	    ARC4_ENTR_HAVE);
}
Exemple #28
0
/*
 * This helper function implements the release-with-wakeup API.  It is
 * executed for the non-trivial case or if the atomic op races.
 *
 * On the i->0 transition is REFCNTF_WAITING is set it will be cleared
 * and a wakeup() will be issued.
 *
 * On any other transition we simply subtract (i) and leave the
 * REFCNTF_WAITING flag intact.
 *
 * This function returns TRUE(1) on the last release, whether a wakeup
 * occured or not, and FALSE(0) otherwise.
 *
 * NOTE!  (i) cannot be 0
 */
int
_refcount_release_wakeup_n(volatile u_int *countp, u_int i)
{
	u_int n;

	for (;;) {
		n = *countp;
		cpu_ccfence();
		if (n == (REFCNTF_WAITING | i)) {
			if (atomic_cmpset_int(countp, n, 0)) {
				wakeup(countp);
				n = i;
				break;
			}
		} else {
			KKASSERT(n != REFCNTF_WAITING); /* illegal state */
			if (atomic_cmpset_int(countp, n, n - i))
				break;
		}
	}
	return (n == i);
}
Exemple #29
0
/*
 * Chain pending links.  Called on the last release of an exclusive or
 * shared lock when the appropriate WANTED bit is set.  mtx_lock old state
 * is passed in with the count left at 1, which we can inherit, and other
 * bits which we must adjust in a single atomic operation.
 *
 * Return non-zero on success, 0 if caller needs to retry.
 *
 * NOTE: It's ok if MTX_EXWANTED is in an indeterminant state while we are
 *	 acquiring LINKSPIN as all other cases will also need to acquire
 *	 LINKSPIN when handling the EXWANTED case.
 */
static int
mtx_chain_link_ex(mtx_t *mtx, u_int olock)
{
	thread_t td = curthread;
	mtx_link_t *link;
	u_int	nlock;

	olock &= ~MTX_LINKSPIN;
	nlock = olock | MTX_LINKSPIN | MTX_EXCLUSIVE;	/* upgrade if necc */
	crit_enter_raw(td);
	if (atomic_cmpset_int(&mtx->mtx_lock, olock, nlock)) {
		link = mtx->mtx_exlink;
		KKASSERT(link != NULL);
		if (link->next == link) {
			mtx->mtx_exlink = NULL;
			nlock = MTX_LINKSPIN | MTX_EXWANTED;	/* to clear */
		} else {
			mtx->mtx_exlink = link->next;
			link->next->prev = link->prev;
			link->prev->next = link->next;
			nlock = MTX_LINKSPIN;			/* to clear */
		}
		KKASSERT(link->state == MTX_LINK_LINKED_EX);
		mtx->mtx_owner = link->owner;
		cpu_sfence();

		/*
		 * WARNING! The callback can only be safely
		 *	    made with LINKSPIN still held
		 *	    and in a critical section.
		 *
		 * WARNING! The link can go away after the
		 *	    state is set, or after the
		 *	    callback.
		 */
		if (link->callback) {
			link->state = MTX_LINK_CALLEDBACK;
			link->callback(link, link->arg, 0);
		} else {
			link->state = MTX_LINK_ACQUIRED;
			wakeup(link);
		}
		atomic_clear_int(&mtx->mtx_lock, nlock);
		crit_exit_raw(td);
		return 1;
	}
	/* retry */
	crit_exit_raw(td);

	return 0;
}
Exemple #30
0
void
hammer_lock_ex_ident(struct hammer_lock *lock, const char *ident)
{
	thread_t td = curthread;
	u_int lv;
	u_int nlv;

	KKASSERT(lock->refs);
	for (;;) {
		lv = lock->lockval;

		if (lv == 0) {
			nlv = 1 | HAMMER_LOCKF_EXCLUSIVE;
			if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
				lock->lowner = td;
				break;
			}
		} else if ((lv & HAMMER_LOCKF_EXCLUSIVE) &&
			   lock->lowner == td) {
			nlv = (lv + 1);
			if (atomic_cmpset_int(&lock->lockval, lv, nlv))
				break;
		} else {
			if (hammer_debug_locks) {
				kprintf("hammer_lock_ex: held by %p\n",
					lock->lowner);
			}
			nlv = lv | HAMMER_LOCKF_WANTED;
			++hammer_contention_count;
			tsleep_interlock(&lock->lockval, 0);
			if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
				tsleep(&lock->lockval, PINTERLOCKED, ident, 0);
				if (hammer_debug_locks)
					kprintf("hammer_lock_ex: try again\n");
			}
		}
	}
}