Пример #1
0
int ib_dealloc_pd(struct ib_pd *pd)
{
	if (atomic_load_acq_int(&pd->usecnt))
		return (EBUSY);

	return pd->device->dealloc_pd(pd);
}
Пример #2
0
/*
 * Allocate an L2T entry for use by a switching rule.  Such need to be
 * explicitly freed and while busy they are not on any hash chain, so normal
 * address resolution updates do not see them.
 */
struct l2t_entry *
t4_l2t_alloc_switching(struct adapter *sc, uint16_t vlan, uint8_t port,
    uint8_t *eth_addr)
{
	struct l2t_data *d = sc->l2t;
	struct l2t_entry *e;
	int rc;

	rw_wlock(&d->lock);
	e = find_or_alloc_l2e(d, vlan, port, eth_addr);
	if (e) {
		if (atomic_load_acq_int(&e->refcnt) == 0) {
			mtx_lock(&e->lock);    /* avoid race with t4_l2t_free */
			e->wrq = &sc->sge.ctrlq[0];
			e->iqid = sc->sge.fwq.abs_id;
			e->state = L2T_STATE_SWITCHING;
			e->vlan = vlan;
			e->lport = port;
			memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
			atomic_store_rel_int(&e->refcnt, 1);
			atomic_subtract_int(&d->nfree, 1);
			rc = t4_write_l2e(e, 0);
			mtx_unlock(&e->lock);
			if (rc != 0)
				e = NULL;
		} else {
			MPASS(e->vlan == vlan);
			MPASS(e->lport == port);
			atomic_add_int(&e->refcnt, 1);
		}
	}
	rw_wunlock(&d->lock);
	return (e);
}
Пример #3
0
int
sysctl_l2t(SYSCTL_HANDLER_ARGS)
{
	struct adapter *sc = arg1;
	struct l2t_data *l2t = sc->l2t;
	struct l2t_entry *e;
	struct sbuf *sb;
	int rc, i, header = 0;
	char ip[INET6_ADDRSTRLEN];

	if (l2t == NULL)
		return (ENXIO);

	rc = sysctl_wire_old_buffer(req, 0);
	if (rc != 0)
		return (rc);

	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
	if (sb == NULL)
		return (ENOMEM);

	e = &l2t->l2tab[0];
	for (i = 0; i < l2t->l2t_size; i++, e++) {
		mtx_lock(&e->lock);
		if (e->state == L2T_STATE_UNUSED)
			goto skip;

		if (header == 0) {
			sbuf_printf(sb, " Idx IP address      "
			    "Ethernet address  VLAN/P LP State Users Port");
			header = 1;
		}
		if (e->state == L2T_STATE_SWITCHING)
			ip[0] = 0;
		else {
			inet_ntop(e->ipv6 ? AF_INET6 : AF_INET, &e->addr[0],
			    &ip[0], sizeof(ip));
		}

		/*
		 * XXX: e->ifp may not be around.
		 * XXX: IPv6 addresses may not align properly in the output.
		 */
		sbuf_printf(sb, "\n%4u %-15s %02x:%02x:%02x:%02x:%02x:%02x %4d"
			   " %u %2u   %c   %5u %s",
			   e->idx, ip, e->dmac[0], e->dmac[1], e->dmac[2],
			   e->dmac[3], e->dmac[4], e->dmac[5],
			   e->vlan & 0xfff, vlan_prio(e), e->lport,
			   l2e_state(e), atomic_load_acq_int(&e->refcnt),
			   e->ifp->if_xname);
skip:
		mtx_unlock(&e->lock);
	}

	rc = sbuf_finish(sb);
	sbuf_delete(sb);

	return (rc);
}
Пример #4
0
void
drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
{

	if (obj == NULL ||
	    atomic_load_acq_int(&obj->handle_count) == 0)
		return;

	if (atomic_fetchadd_int(&obj->handle_count, -1) == 1)
		drm_gem_object_handle_free(obj);
	drm_gem_object_unreference_unlocked(obj);
}
Пример #5
0
void
smp_rendezvous_cpus(cpumask_t map,
	void (* setup_func)(void *), 
	void (* action_func)(void *),
	void (* teardown_func)(void *),
	void *arg)
{
	int i, ncpus = 0;

	if (!smp_started) {
		if (setup_func != NULL)
			setup_func(arg);
		if (action_func != NULL)
			action_func(arg);
		if (teardown_func != NULL)
			teardown_func(arg);
		return;
	}

	CPU_FOREACH(i) {
		if (((1 << i) & map) != 0)
			ncpus++;
	}
	if (ncpus == 0)
		panic("ncpus is 0 with map=0x%x", map);

	/* obtain rendezvous lock */
	mtx_lock_spin(&smp_ipi_mtx);

	/* set static function pointers */
	smp_rv_ncpus = ncpus;
	smp_rv_setup_func = setup_func;
	smp_rv_action_func = action_func;
	smp_rv_teardown_func = teardown_func;
	smp_rv_func_arg = arg;
	smp_rv_waiters[1] = 0;
	smp_rv_waiters[2] = 0;
	atomic_store_rel_int(&smp_rv_waiters[0], 0);

	/* signal other processors, which will enter the IPI with interrupts off */
	ipi_selected(map & ~(1 << curcpu), IPI_RENDEZVOUS);

	/* Check if the current CPU is in the map */
	if ((map & (1 << curcpu)) != 0)
		smp_rendezvous_action();

	if (teardown_func == smp_no_rendevous_barrier)
		while (atomic_load_acq_int(&smp_rv_waiters[2]) < ncpus)
			cpu_spinwait();

	/* release lock */
	mtx_unlock_spin(&smp_ipi_mtx);
}
Пример #6
0
void
AcpiOsWaitEventsComplete(void)
{
	int i;

	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);

	for (i = 0; i < acpi_max_tasks; i++)
		if ((atomic_load_acq_int(&acpi_tasks[i].at_flag) &
		    ACPI_TASK_ENQUEUED) != 0)
			taskqueue_drain(acpi_taskq, &acpi_tasks[i].at_task);
	return_VOID;
}
Пример #7
0
/*
 * Allocate a free L2T entry.  Must be called with l2t_data.lock held.
 */
struct l2t_entry *
t4_alloc_l2e(struct l2t_data *d)
{
	struct l2t_entry *end, *e, **p;

	rw_assert(&d->lock, RA_WLOCKED);

	if (!atomic_load_acq_int(&d->nfree))
		return (NULL);

	/* there's definitely a free entry */
	for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e)
		if (atomic_load_acq_int(&e->refcnt) == 0)
			goto found;

	for (e = d->l2tab; atomic_load_acq_int(&e->refcnt); ++e)
		continue;
found:
	d->rover = e + 1;
	atomic_subtract_int(&d->nfree, 1);

	/*
	 * The entry we found may be an inactive entry that is
	 * presently in the hash table.  We need to remove it.
	 */
	if (e->state < L2T_STATE_SWITCHING) {
		for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) {
			if (*p == e) {
				*p = e->next;
				e->next = NULL;
				break;
			}
		}
	}

	e->state = L2T_STATE_UNUSED;
	return (e);
}
Пример #8
0
/*
 * Query from the BPF framework regarding whether the buffer currently in the
 * held position can be moved to the free position, which can be indicated by
 * the user process making their generation number equal to the kernel
 * generation number.
 */
int
bpf_zerocopy_canfreebuf(struct bpf_d *d)
{
	struct zbuf *zb;

	KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
	    ("bpf_zerocopy_canfreebuf: not in zbuf mode"));

	zb = (struct zbuf *)d->bd_hbuf;
	if (zb == NULL)
		return (0);
	if (zb->zb_header->bzh_kernel_gen ==
	    atomic_load_acq_int(&zb->zb_header->bzh_user_gen))
		return (1);
	return (0);
}
Пример #9
0
/*
 * Reconfigure specified per-CPU timer on other CPU. Called from IPI handler.
 */
inline static int
doconfigtimer(int i)
{
	tc *conf;

	conf = DPCPU_PTR(configtimer);
	if (atomic_load_acq_int(*conf + i)) {
		if (i == 0 ? timer1hz : timer2hz)
			et_start(timer[i], NULL, &timerperiod[i]);
		else
			et_stop(timer[i]);
		atomic_store_rel_int(*conf + i, 0);
		return (1);
	}
	return (0);
}
Пример #10
0
/*
 * Called when the host's ARP layer makes a change to some entry that is loaded
 * into the HW L2 table.
 */
void
t4_l2_update(struct toedev *tod, struct ifnet *ifp, struct sockaddr *sa,
    uint8_t *lladdr, uint16_t vtag)
{
	struct adapter *sc = tod->tod_softc;
	struct l2t_entry *e;
	struct l2t_data *d = sc->l2t;
	u_int hash;

	KASSERT(d != NULL, ("%s: no L2 table", __func__));

	hash = l2_hash(d, sa, ifp->if_index);
	rw_rlock(&d->lock);
	for (e = d->l2tab[hash].first; e; e = e->next) {
		if (l2_cmp(sa, e) == 0 && e->ifp == ifp) {
			mtx_lock(&e->lock);
			if (atomic_load_acq_int(&e->refcnt))
				goto found;
			e->state = L2T_STATE_STALE;
			mtx_unlock(&e->lock);
			break;
		}
	}
	rw_runlock(&d->lock);

	/*
	 * This is of no interest to us.  We've never had an offloaded
	 * connection to this destination, and we aren't attempting one right
	 * now.
	 */
	return;

found:
	rw_runlock(&d->lock);

	KASSERT(e->state != L2T_STATE_UNUSED,
	    ("%s: unused entry in the hash.", __func__));

	update_entry(sc, e, lladdr, vtag);
	mtx_unlock(&e->lock);
}
Пример #11
0
static void
EcGpeQueryHandler(void *Context)
{
    struct acpi_ec_softc *sc = (struct acpi_ec_softc *)Context;
    int pending;

    KASSERT(Context != NULL, ("EcGpeQueryHandler called with NULL"));

    do {
	/* Read the current pending count */
	pending = atomic_load_acq_int(&sc->ec_sci_pend);

	/* Call GPE handler function */
	EcGpeQueryHandlerSub(sc);

	/*
	 * Try to reset the pending count to zero. If this fails we
	 * know another GPE event has occurred while handling the
	 * current GPE event and need to loop.
	 */
    } while (!atomic_cmpset_int(&sc->ec_sci_pend, pending, 0));
}
Пример #12
0
/*
 * Reconfigure specified timer.
 * For per-CPU timers use IPI to make other CPUs to reconfigure.
 */
static void
configtimer(int i)
{
#ifdef SMP
	tc *conf;
	int cpu;

	critical_enter();
#endif
	/* Start/stop global timer or per-CPU timer of this CPU. */
	if (i == 0 ? timer1hz : timer2hz)
		et_start(timer[i], NULL, &timerperiod[i]);
	else
		et_stop(timer[i]);
#ifdef SMP
	if ((timer[i]->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) {
		critical_exit();
		return;
	}
	/* Set reconfigure flags for other CPUs. */
	CPU_FOREACH(cpu) {
		conf = DPCPU_ID_PTR(cpu, configtimer);
		atomic_store_rel_int(*conf + i, (cpu == curcpu) ? 0 : 1);
	}
	/* Send reconfigure IPI. */
	ipi_all_but_self(i == 0 ? IPI_HARDCLOCK : IPI_STATCLOCK);
	/* Wait for reconfiguration completed. */
restart:
	cpu_spinwait();
	CPU_FOREACH(cpu) {
		if (cpu == curcpu)
			continue;
		conf = DPCPU_ID_PTR(cpu, configtimer);
		if (atomic_load_acq_int(*conf + i))
			goto restart;
	}
	critical_exit();
#endif
}
Пример #13
0
static struct l2t_entry *
find_or_alloc_l2e(struct l2t_data *d, uint16_t vlan, uint8_t port, uint8_t *dmac)
{
	struct l2t_entry *end, *e, **p;
	struct l2t_entry *first_free = NULL;

	for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) {
		if (atomic_load_acq_int(&e->refcnt) == 0) {
			if (!first_free)
				first_free = e;
		} else if (e->state == L2T_STATE_SWITCHING &&
		    memcmp(e->dmac, dmac, ETHER_ADDR_LEN) == 0 &&
		    e->vlan == vlan && e->lport == port)
			return (e);	/* Found existing entry that matches. */
	}

	if (first_free == NULL)
		return (NULL);	/* No match and no room for a new entry. */

	/*
	 * The entry we found may be an inactive entry that is
	 * presently in the hash table.  We need to remove it.
	 */
	e = first_free;
	if (e->state < L2T_STATE_SWITCHING) {
		for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) {
			if (*p == e) {
				*p = e->next;
				e->next = NULL;
				break;
			}
		}
	}
	e->state = L2T_STATE_UNUSED;
	return (e);
}
Пример #14
0
void
init_secondary(int cpu)
{
	struct pcpu *pc;
	uint32_t loop_counter;
#ifndef INTRNG
	int start = 0, end = 0;
#endif
	uint32_t actlr_mask, actlr_set;

	pmap_set_tex();
	cpuinfo_get_actlr_modifier(&actlr_mask, &actlr_set);
	reinit_mmu(pmap_kern_ttb, actlr_mask, actlr_set);
	cpu_setup();

	/* Provide stack pointers for other processor modes. */
	set_stackptrs(cpu);

	enable_interrupts(PSR_A);
	pc = &__pcpu[cpu];

	/*
	 * pcpu_init() updates queue, so it should not be executed in parallel
	 * on several cores
	 */
	while(mp_naps < (cpu - 1))
		;

	pcpu_init(pc, cpu, sizeof(struct pcpu));
	dpcpu_init(dpcpu[cpu - 1], cpu);
#if __ARM_ARCH >= 6 && defined(DDB)
	dbg_monitor_init_secondary();
#endif
	/* Signal our startup to BSP */
	atomic_add_rel_32(&mp_naps, 1);

	/* Spin until the BSP releases the APs */
	while (!atomic_load_acq_int(&aps_ready)) {
#if __ARM_ARCH >= 7
		__asm __volatile("wfe");
#endif
	}

	/* Initialize curthread */
	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
	pc->pc_curthread = pc->pc_idlethread;
	pc->pc_curpcb = pc->pc_idlethread->td_pcb;
	set_curthread(pc->pc_idlethread);
#ifdef VFP
	vfp_init();
#endif

	/* Configure the interrupt controller */
	intr_pic_init_secondary();

	mtx_lock_spin(&ap_boot_mtx);

	atomic_add_rel_32(&smp_cpus, 1);

	if (smp_cpus == mp_ncpus) {
		/* enable IPI's, tlb shootdown, freezes etc */
		atomic_store_rel_int(&smp_started, 1);
	}

	mtx_unlock_spin(&ap_boot_mtx);

#ifndef INTRNG
	/* Enable ipi */
#ifdef IPI_IRQ_START
	start = IPI_IRQ_START;
#ifdef IPI_IRQ_END
	end = IPI_IRQ_END;
#else
	end = IPI_IRQ_START;
#endif
#endif

	for (int i = start; i <= end; i++)
		arm_unmask_irq(i);
#endif /* INTRNG */
	enable_interrupts(PSR_I);

	loop_counter = 0;
	while (smp_started == 0) {
		DELAY(100);
		loop_counter++;
		if (loop_counter == 1000)
			CTR0(KTR_SMP, "AP still wait for smp_started");
	}
	/* Start per-CPU event timers. */
	cpu_initclocks_ap();

	CTR0(KTR_SMP, "go into scheduler");

	/* Enter the scheduler */
	sched_throw(NULL);

	panic("scheduler returned us to %s", __func__);
	/* NOTREACHED */
}
Пример #15
0
void
smp_rendezvous_cpus(cpuset_t map,
	void (* setup_func)(void *), 
	void (* action_func)(void *),
	void (* teardown_func)(void *),
	void *arg)
{
	int curcpumap, i, ncpus = 0;

	/* Look comments in the !SMP case. */
	if (!smp_started) {
		spinlock_enter();
		if (setup_func != NULL)
			setup_func(arg);
		if (action_func != NULL)
			action_func(arg);
		if (teardown_func != NULL)
			teardown_func(arg);
		spinlock_exit();
		return;
	}

	CPU_FOREACH(i) {
		if (CPU_ISSET(i, &map))
			ncpus++;
	}
	if (ncpus == 0)
		panic("ncpus is 0 with non-zero map");

	mtx_lock_spin(&smp_ipi_mtx);

	/* Pass rendezvous parameters via global variables. */
	smp_rv_ncpus = ncpus;
	smp_rv_setup_func = setup_func;
	smp_rv_action_func = action_func;
	smp_rv_teardown_func = teardown_func;
	smp_rv_func_arg = arg;
	smp_rv_waiters[1] = 0;
	smp_rv_waiters[2] = 0;
	smp_rv_waiters[3] = 0;
	atomic_store_rel_int(&smp_rv_waiters[0], 0);

	/*
	 * Signal other processors, which will enter the IPI with
	 * interrupts off.
	 */
	curcpumap = CPU_ISSET(curcpu, &map);
	CPU_CLR(curcpu, &map);
	ipi_selected(map, IPI_RENDEZVOUS);

	/* Check if the current CPU is in the map */
	if (curcpumap != 0)
		smp_rendezvous_action();

	/*
	 * Ensure that the master CPU waits for all the other
	 * CPUs to finish the rendezvous, so that smp_rv_*
	 * pseudo-structure and the arg are guaranteed to not
	 * be in use.
	 */
	while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus)
		cpu_spinwait();

	mtx_unlock_spin(&smp_ipi_mtx);
}
Пример #16
0
static int atomic_get(sp_counted_base_atomic_type volatile *pw)
{
    return atomic_load_acq_int(&pw->ui);
}
Пример #17
0
static bool vm_is_paused(struct vm *vm) {
	return atomic_load_acq_int(&vm->hv_is_paused) == TRUE;
}