Ejemplo n.º 1
0
static void
def_lock_release(void *lock)
{
    Lock *l = (Lock *)lock;

    if ((l->lock & WAFLAG) == 0)
    	atomic_add_rel_int(&l->lock, -RC_INCR);
    else {
    	atomic_add_rel_int(&l->lock, -WAFLAG);
    	sigprocmask(SIG_SETMASK, &oldsigmask, NULL);
    }
}
Ejemplo n.º 2
0
static void
def_lock_release(void *lock)
{
	Lock *l;

	l = (Lock *)lock;
	if ((l->lock & WAFLAG) == 0)
		atomic_add_rel_int(&l->lock, -RC_INCR);
	else {
		assert(wnested > 0);
		atomic_add_rel_int(&l->lock, -WAFLAG);
		if (atomic_fetchadd_int(&wnested, -1) == 1)
			sigprocmask(SIG_SETMASK, &oldsigmask, NULL);
	}
}
Ejemplo n.º 3
0
void
drm_gem_object_handle_reference(struct drm_gem_object *obj)
{

	drm_gem_object_reference(obj);
	atomic_add_rel_int(&obj->handle_count, 1);
}
Ejemplo n.º 4
0
/*
 * Support Functions
 */
static void
ioat_submit_single(struct ioat_softc *ioat)
{

	ioat_get(ioat, IOAT_ACTIVE_DESCR_REF);
	atomic_add_rel_int(&ioat->head, 1);
	atomic_add_rel_int(&ioat->hw_head, 1);

	if (!ioat->is_completion_pending) {
		ioat->is_completion_pending = TRUE;
		callout_reset(&ioat->timer, IOAT_INTR_TIMO,
		    ioat_timer_callback, ioat);
	}

	ioat->stats.descriptors_submitted++;
}
Ejemplo n.º 5
0
/*
 * Notification from the BPF framework that a buffer has moved into the held
 * slot on a descriptor.  Zero-copy BPF will update the shared page to let
 * the user process know and flag the buffer as assigned if it hasn't already
 * been marked assigned due to filling while it was in the store position.
 *
 * Note: identical logic as in bpf_zerocopy_buffull(), except that we operate
 * on bd_hbuf and bd_hlen.
 */
void
bpf_zerocopy_bufheld(struct bpf_d *d)
{
	struct zbuf *zb;

	KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
	    ("bpf_zerocopy_bufheld: not in zbuf mode"));

	zb = (struct zbuf *)d->bd_hbuf;
	KASSERT(zb != NULL, ("bpf_zerocopy_bufheld: zb == NULL"));

	if ((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0) {
		zb->zb_flags |= ZBUF_FLAG_ASSIGNED;
		zb->zb_header->bzh_kernel_len = d->bd_hlen;
		atomic_add_rel_int(&zb->zb_header->bzh_kernel_gen, 1);
	}
}
Ejemplo n.º 6
0
/*
 * All-CPU rendezvous.  CPUs are signalled, all execute the setup function 
 * (if specified), rendezvous, execute the action function (if specified),
 * rendezvous again, execute the teardown function (if specified), and then
 * resume.
 *
 * Note that the supplied external functions _must_ be reentrant and aware
 * that they are running in parallel and in an unknown lock context.
 */
void
smp_rendezvous_action(void)
{
	struct thread *td;
	void *local_func_arg;
	void (*local_setup_func)(void*);
	void (*local_action_func)(void*);
	void (*local_teardown_func)(void*);
#ifdef INVARIANTS
	int owepreempt;
#endif

	/* Ensure we have up-to-date values. */
	atomic_add_acq_int(&smp_rv_waiters[0], 1);
	while (smp_rv_waiters[0] < smp_rv_ncpus)
		cpu_spinwait();

	/* Fetch rendezvous parameters after acquire barrier. */
	local_func_arg = smp_rv_func_arg;
	local_setup_func = smp_rv_setup_func;
	local_action_func = smp_rv_action_func;
	local_teardown_func = smp_rv_teardown_func;

	/*
	 * Use a nested critical section to prevent any preemptions
	 * from occurring during a rendezvous action routine.
	 * Specifically, if a rendezvous handler is invoked via an IPI
	 * and the interrupted thread was in the critical_exit()
	 * function after setting td_critnest to 0 but before
	 * performing a deferred preemption, this routine can be
	 * invoked with td_critnest set to 0 and td_owepreempt true.
	 * In that case, a critical_exit() during the rendezvous
	 * action would trigger a preemption which is not permitted in
	 * a rendezvous action.  To fix this, wrap all of the
	 * rendezvous action handlers in a critical section.  We
	 * cannot use a regular critical section however as having
	 * critical_exit() preempt from this routine would also be
	 * problematic (the preemption must not occur before the IPI
	 * has been acknowledged via an EOI).  Instead, we
	 * intentionally ignore td_owepreempt when leaving the
	 * critical section.  This should be harmless because we do
	 * not permit rendezvous action routines to schedule threads,
	 * and thus td_owepreempt should never transition from 0 to 1
	 * during this routine.
	 */
	td = curthread;
	td->td_critnest++;
#ifdef INVARIANTS
	owepreempt = td->td_owepreempt;
#endif
	
	/*
	 * If requested, run a setup function before the main action
	 * function.  Ensure all CPUs have completed the setup
	 * function before moving on to the action function.
	 */
	if (local_setup_func != smp_no_rendevous_barrier) {
		if (smp_rv_setup_func != NULL)
			smp_rv_setup_func(smp_rv_func_arg);
		atomic_add_int(&smp_rv_waiters[1], 1);
		while (smp_rv_waiters[1] < smp_rv_ncpus)
                	cpu_spinwait();
	}

	if (local_action_func != NULL)
		local_action_func(local_func_arg);

	if (local_teardown_func != smp_no_rendevous_barrier) {
		/*
		 * Signal that the main action has been completed.  If a
		 * full exit rendezvous is requested, then all CPUs will
		 * wait here until all CPUs have finished the main action.
		 */
		atomic_add_int(&smp_rv_waiters[2], 1);
		while (smp_rv_waiters[2] < smp_rv_ncpus)
			cpu_spinwait();

		if (local_teardown_func != NULL)
			local_teardown_func(local_func_arg);
	}

	/*
	 * Signal that the rendezvous is fully completed by this CPU.
	 * This means that no member of smp_rv_* pseudo-structure will be
	 * accessed by this target CPU after this point; in particular,
	 * memory pointed by smp_rv_func_arg.
	 *
	 * The release semantic ensures that all accesses performed by
	 * the current CPU are visible when smp_rendezvous_cpus()
	 * returns, by synchronizing with the
	 * atomic_load_acq_int(&smp_rv_waiters[3]).
	 */
	atomic_add_rel_int(&smp_rv_waiters[3], 1);

	td->td_critnest--;
	KASSERT(owepreempt == td->td_owepreempt,
	    ("rendezvous action changed td_owepreempt"));
}