Пример #1
0
void
vfs_vnode_sysinit(void)
{
	int error __diagused;

	dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
	KASSERT(dead_rootmount != NULL);
	dead_rootmount->mnt_iflag = IMNT_MPSAFE;

	mutex_init(&vnode_free_list_lock, MUTEX_DEFAULT, IPL_NONE);
	TAILQ_INIT(&vnode_free_list);
	TAILQ_INIT(&vnode_hold_list);
	TAILQ_INIT(&vrele_list);

	vcache_init();

	mutex_init(&vrele_lock, MUTEX_DEFAULT, IPL_NONE);
	cv_init(&vdrain_cv, "vdrain");
	cv_init(&vrele_cv, "vrele");
	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
	    NULL, NULL, "vdrain");
	KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vrele_thread,
	    NULL, &vrele_lwp, "vrele");
	KASSERTMSG((error == 0), "kthread_create(vrele) failed: %d", error);
}
Пример #2
0
void
cpu_intr(int ppl, vaddr_t pc, uint32_t status)
{
	struct cpu_info * const ci = curcpu();
	uint32_t pending;
	int ipl;
#ifdef DIAGNOSTIC
	const int mtx_count = ci->ci_mtx_count;
	const u_int biglock_count = ci->ci_biglock_count;
	const u_int blcnt = curlwp->l_blcnt;
#endif
	KASSERT(ci->ci_cpl == IPL_HIGH);
	KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);

	ci->ci_data.cpu_nintr++;

	while (ppl < (ipl = splintr(&pending))) {
		KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
		splx(ipl);	/* lower to interrupt level */
		KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);

		KASSERTMSG(ci->ci_cpl == ipl,
		    "%s: cpl (%d) != ipl (%d)", __func__, ci->ci_cpl, ipl);
		KASSERT(pending != 0);

		cf.pc = pc;
		cf.sr = status;
		cf.intr = (ci->ci_idepth > 1);

#ifdef MIPS3_ENABLE_CLOCK_INTR
		if (pending & MIPS_INT_MASK_5) {
			KASSERTMSG(ipl == IPL_SCHED,
			    "%s: ipl (%d) != IPL_SCHED (%d)",
			     __func__, ipl, IPL_SCHED);
			/* call the common MIPS3 clock interrupt handler */ 
			mips3_clockintr(&cf);
			pending ^= MIPS_INT_MASK_5;
		}
#endif

		if (pending != 0) {
			/* Process I/O and error interrupts. */
			evbmips_iointr(ipl, pc, pending);
		}
		KASSERT(biglock_count == ci->ci_biglock_count);
		KASSERT(blcnt == curlwp->l_blcnt);
		KASSERT(mtx_count == ci->ci_mtx_count);

		/*
		 * If even our spl is higher now (due to interrupting while
		 * spin-lock is held and higher IPL spin-lock is locked, it
		 * can no longer be locked so it's safe to lower IPL back
		 * to ppl.
		 */
		(void) splhigh();	/* disable interrupts */
	}

	KASSERT(ci->ci_cpl == IPL_HIGH);
	KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
}
Пример #3
0
static void
vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
    const char *func, int line)
{
	vnode_impl_t *node = VNODE_TO_VIMPL(vp);

	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
	if (from == VS_LOADING)
		KASSERTMSG(mutex_owned(&vcache.lock), "at %s:%d", func, line);

	if (from == VS_MARKER)
		vnpanic(vp, "from is %s at %s:%d",
		    vstate_name(from), func, line);
	if (to == VS_MARKER)
		vnpanic(vp, "to is %s at %s:%d",
		    vstate_name(to), func, line);
	if (node->vi_state != from)
		vnpanic(vp, "from is %s, expected %s at %s:%d\n",
		    vstate_name(node->vi_state), vstate_name(from), func, line);

	node->vi_state = to;
	if (from == VS_LOADING)
		cv_broadcast(&vcache.cv);
	if (to == VS_ACTIVE || to == VS_RECLAIMED)
		cv_broadcast(&vp->v_cv);
}
Пример #4
0
void
splx(int savedipl)
{
	struct cpu_info * const ci = curcpu();
	KASSERT(savedipl < NIPL);

	if (__predict_false(savedipl == ci->ci_cpl)) {
		return;
	}

	register_t psw = cpsid(I32_bit);
	KASSERTMSG(panicstr != NULL || savedipl < ci->ci_cpl,
	    "splx(%d) to a higher ipl than %d", savedipl, ci->ci_cpl);

	ci->ci_intr_depth++;
	pic_do_pending_ints(psw, savedipl, NULL);
	ci->ci_intr_depth--;
	KASSERTMSG(ci->ci_cpl == savedipl, "cpl %d savedipl %d",
	    ci->ci_cpl, savedipl);
	if ((psw & I32_bit) == 0)
		cpsie(I32_bit);
	cpu_dosoftints();
	KASSERTMSG(ci->ci_cpl == savedipl, "cpl %d savedipl %d",
	    ci->ci_cpl, savedipl);
}
Пример #5
0
void *
cpu_uarea_alloc(bool system)
{
	struct pglist pglist;
#ifdef _LP64
	const paddr_t high = mips_avail_end;
#else
	const paddr_t high = MIPS_KSEG1_START - MIPS_KSEG0_START;
	/*
	 * Don't allocate a direct mapped uarea if aren't allocating for a
	 * system lwp and we have memory that can't be mapped via KSEG0.
	 * If 
	 */
	if (!system && high > mips_avail_end)
		return NULL;
#endif
	int error;

	/*
	 * Allocate a new physically contiguous uarea which can be
	 * direct-mapped.
	 */
	error = uvm_pglistalloc(USPACE, mips_avail_start, high,
	    USPACE_ALIGN, 0, &pglist, 1, 1);
	if (error) {
#ifdef _LP64
		if (!system)
			return NULL;
#endif
		panic("%s: uvm_pglistalloc failed: %d", __func__, error);
	}

	/*
	 * Get the physical address from the first page.
	 */
	const struct vm_page * const pg = TAILQ_FIRST(&pglist);
	KASSERT(pg != NULL);
	const paddr_t pa = VM_PAGE_TO_PHYS(pg);
	KASSERTMSG(pa >= mips_avail_start,
	    "pa (%#"PRIxPADDR") < mips_avail_start (%#"PRIxPADDR")",
	     pa, mips_avail_start);
	KASSERTMSG(pa < mips_avail_end,
	    "pa (%#"PRIxPADDR") >= mips_avail_end (%#"PRIxPADDR")",
	     pa, mips_avail_end);

	/*
	 * we need to return a direct-mapped VA for the pa.
	 */
#ifdef _LP64
	const vaddr_t va = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
#else
	const vaddr_t va = MIPS_PHYS_TO_KSEG0(pa);
#endif

	return (void *)va;
}
Пример #6
0
/*
 * mutex_obj_hold:
 *
 *	Add a single reference to a lock object.  A reference to the object
 *	must already be held, and must be held across this call.
 */
void
mutex_obj_hold(kmutex_t *lock)
{
	struct kmutexobj *mo = (struct kmutexobj *)lock;

	KASSERTMSG(mo->mo_magic == MUTEX_OBJ_MAGIC,
	    "%s: lock %p: mo->mo_magic (%#x) != MUTEX_OBJ_MAGIC (%#x)",
	     __func__, mo, mo->mo_magic, MUTEX_OBJ_MAGIC);
	KASSERTMSG(mo->mo_refcnt > 0,
	    "%s: lock %p: mo->mo_refcnt (%#x) == 0",
	     __func__, mo, mo->mo_refcnt);

	atomic_inc_uint(&mo->mo_refcnt);
}
Пример #7
0
static void
intr_deliver(struct intr_source *is, int virq)
{
	bool locked = false;
	for (struct intrhand *ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
		KASSERTMSG(ih->ih_fun != NULL,
		    "%s: irq %d, hwirq %d, is %p ih %p: "
		     "NULL interrupt handler!\n", __func__,
		     virq, is->is_hwirq, is, ih);
		if (ih->ih_ipl == IPL_VM) {
			if (!locked) {
				KERNEL_LOCK(1, NULL);
				locked = true;
			}
		} else if (locked) {
			KERNEL_UNLOCK_ONE(NULL);
			locked = false;
		}
		(*ih->ih_fun)(ih->ih_arg);
	}
	if (locked) {
		KERNEL_UNLOCK_ONE(NULL);
	}
	is->is_ev.ev_count++;
}
Пример #8
0
/*
 * Attach a statically-initialized event.  The type and string pointers
 * are already set up.
 */
void
evcnt_attach_static(struct evcnt *ev)
{
	int len;

	KASSERTMSG(init_done,
	    "%s: evcnt non initialized: group=<%s> name=<%s>",
	    __func__, ev->ev_group, ev->ev_name);

	len = strlen(ev->ev_group);
#ifdef DIAGNOSTIC
	if (len == 0 || len >= EVCNT_STRING_MAX) /* ..._MAX includes NUL */
		panic("evcnt_attach_static: group length (%s)", ev->ev_group);
#endif
	ev->ev_grouplen = len;

	len = strlen(ev->ev_name);
#ifdef DIAGNOSTIC
	if (len == 0 || len >= EVCNT_STRING_MAX) /* ..._MAX includes NUL */
		panic("evcnt_attach_static: name length (%s)", ev->ev_name);
#endif
	ev->ev_namelen = len;

	mutex_enter(&evcnt_lock);
	TAILQ_INSERT_TAIL(&allevents, ev, ev_list);
	mutex_exit(&evcnt_lock);
}
Пример #9
0
uint32_t
pic_find_pending_irqs_by_ipl(struct pic_softc *pic, size_t irq_base,
	uint32_t pending, int ipl)
{
	uint32_t ipl_irq_mask = 0;
	uint32_t irq_mask;

	for (;;) {
		int irq = ffs(pending);
		if (irq-- == 0)
			return ipl_irq_mask;

		irq_mask = __BIT(irq);
#if 1
    		KASSERTMSG(pic->pic_sources[irq_base + irq] != NULL,
		   "%s: irq_base %zu irq %d\n", __func__, irq_base, irq);
#else
		if (pic->pic_sources[irq_base + irq] == NULL) {
			aprint_error("stray interrupt? irq_base=%zu irq=%d\n",
			    irq_base, irq);
		} else
#endif
		if (pic->pic_sources[irq_base + irq]->is_ipl == ipl)
			ipl_irq_mask |= irq_mask;

		pending &= ~irq_mask;
	}
}
Пример #10
0
/*
 * Set the starting transmit rate for a node.
 */
static void
ath_rate_ctl_start(struct ath_softc *sc, struct ieee80211_node *ni)
{
#define	RATE(_ix)	(ni->ni_rates.rs_rates[(_ix)] & IEEE80211_RATE_VAL)
	struct ieee80211com *ic = &sc->sc_ic;
	int srate;

	KASSERTMSG(ni->ni_rates.rs_nrates > 0, "no rates");
	if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
		/*
		 * No fixed rate is requested. For 11b start with
		 * the highest negotiated rate; otherwise, for 11g
		 * and 11a, we start "in the middle" at 24Mb or 36Mb.
		 */
		srate = ni->ni_rates.rs_nrates - 1;
		if (sc->sc_curmode != IEEE80211_MODE_11B) {
			/*
			 * Scan the negotiated rate set to find the
			 * closest rate.
			 */
			/* NB: the rate set is assumed sorted */
			for (; srate >= 0 && RATE(srate) > 72; srate--)
				;
			KASSERTMSG(srate >= 0, "bogus rate set");
		}
	} else {
		/*
		 * A fixed rate is to be used; ic_fixed_rate is an
		 * index into the supported rate set.  Convert this
		 * to the index into the negotiated rate set for
		 * the node.  We know the rate is there because the
		 * rate set is checked when the station associates.
		 */
		const struct ieee80211_rateset *rs =
			&ic->ic_sup_rates[ic->ic_curmode];
		int r = rs->rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
		/* NB: the rate set is assumed sorted */
		srate = ni->ni_rates.rs_nrates - 1;
		for (; srate >= 0 && RATE(srate) != r; srate--)
			;
		KASSERTMSG(srate >= 0,
			"fixed rate %d not in rate set", ic->ic_fixed_rate);
	}
	ath_rate_update(sc, ni, srate);
#undef RATE
}
Пример #11
0
/*
 * mutex_obj_free:
 *
 *	Drop a reference from a lock object.  If the last reference is being
 *	dropped, free the object and return true.  Otherwise, return false.
 */
bool
mutex_obj_free(kmutex_t *lock)
{
	struct kmutexobj *mo = (struct kmutexobj *)lock;

	KASSERTMSG(mo->mo_magic == MUTEX_OBJ_MAGIC,
	    "%s: lock %p: mo->mo_magic (%#x) != MUTEX_OBJ_MAGIC (%#x)",
	     __func__, mo, mo->mo_magic, MUTEX_OBJ_MAGIC);
	KASSERTMSG(mo->mo_refcnt > 0,
	    "%s: lock %p: mo->mo_refcnt (%#x) == 0",
	     __func__, mo, mo->mo_refcnt);

	if (atomic_dec_uint_nv(&mo->mo_refcnt) > 0) {
		return false;
	}
	mutex_destroy(&mo->mo_lock);
	pool_cache_put(mutex_obj_cache, mo);
	return true;
}
Пример #12
0
static void
vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line)
{
	vnode_impl_t *node = VNODE_TO_VIMPL(vp);

	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);

	if (__predict_true(node->vi_state == state))
		return;
	vnpanic(vp, "state is %s, expected %s at %s:%d",
	    vstate_name(node->vi_state), vstate_name(state), func, line);
}
Пример #13
0
static enum vnode_state
vstate_assert_get(vnode_t *vp, const char *func, int line)
{
	vnode_impl_t *node = VNODE_TO_VIMPL(vp);

	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
	if (node->vi_state == VS_MARKER)
		vnpanic(vp, "state is %s at %s:%d",
		    vstate_name(node->vi_state), func, line);

	return node->vi_state;
}
Пример #14
0
/*
 * pcu_lwp_op: perform PCU state save, release or both operations on LWP.
 */
static void
pcu_lwp_op(const pcu_ops_t *pcu, lwp_t *l, const int flags)
{
	const u_int id = pcu->pcu_id;
	struct cpu_info *ci;
	uint64_t where;
	int s;

	/*
	 * Caller should have re-checked if there is any state to manage.
	 * Block the interrupts and inspect again, since cross-call sent
	 * by remote CPU could have changed the state.
	 */
	s = splsoftclock();
	ci = l->l_pcu_cpu[id];
	if (ci == curcpu()) {
		/*
		 * State is on the current CPU - just perform the operations.
		 */
		KASSERT((flags & PCU_CLAIM) == 0);
		KASSERTMSG(ci->ci_pcu_curlwp[id] == l,
		    "%s: cpu%u: pcu_curlwp[%u] (%p) != l (%p)",
		     __func__, cpu_index(ci), id, ci->ci_pcu_curlwp[id], l);
		pcu_do_op(pcu, l, flags);
		splx(s);
		return;
	}

	if (__predict_false(ci == NULL)) {
		if (flags & PCU_CLAIM) {
			pcu_do_op(pcu, l, flags);
		}
		/* Cross-call has won the race - no state to manage. */
		splx(s);
		return;
	}

	splx(s);

	/*
	 * State is on the remote CPU - perform the operations there.
	 * Note: there is a race condition; see description in the top.
	 */
	where = xc_unicast(XC_HIGHPRI, (xcfunc_t)pcu_cpu_op,
	    __UNCONST(pcu), (void *)(uintptr_t)flags, ci);
	xc_wait(where);

	KASSERT((flags & PCU_RELEASE) == 0 || l->l_pcu_cpu[id] == NULL);
}
Пример #15
0
void
pcu_switchpoint(lwp_t *l)
{
	const uint32_t pcu_kernel_inuse = l->l_pcu_used[PCU_KERNEL];
	uint32_t pcu_user_inuse = l->l_pcu_used[PCU_USER];
	/* int s; */

	KASSERTMSG(l == curlwp, "l %p != curlwp %p", l, curlwp);

	if (__predict_false(pcu_kernel_inuse != 0)) {
		for (u_int id = 0; id < PCU_UNIT_COUNT; id++) {
			if ((pcu_kernel_inuse & (1 << id)) == 0) {
				continue;
			}
			struct cpu_info * const pcu_ci = l->l_pcu_cpu[id];
			if (pcu_ci == NULL || pcu_ci == l->l_cpu) {
				continue;
			}
			const pcu_ops_t * const pcu = pcu_ops_md_defs[id];
			/*
			 * Steal the PCU away from the current owner and
			 * take ownership of it.
			 */
			pcu_cpu_op(pcu, PCU_SAVE | PCU_RELEASE);
			pcu_do_op(pcu, l, PCU_KERNEL | PCU_CLAIM | PCU_RELOAD);
			pcu_user_inuse &= ~(1 << id);
		}
	}

	if (__predict_true(pcu_user_inuse == 0)) {
		/* PCUs are not in use. */
		return;
	}
	/* commented out as we know we are already at IPL_SCHED */
	/* s = splsoftclock(); */
	for (u_int id = 0; id < PCU_UNIT_COUNT; id++) {
		if ((pcu_user_inuse & (1 << id)) == 0) {
			continue;
		}
		struct cpu_info * const pcu_ci = l->l_pcu_cpu[id];
		if (pcu_ci == NULL || pcu_ci == l->l_cpu) {
			continue;
		}
		const pcu_ops_t * const pcu = pcu_ops_md_defs[id];
		pcu->pcu_state_release(l, 0);
	}
	/* splx(s); */
}
/*
 * callout_destroy:
 *
 *	Destroy a callout structure.  The callout must be stopped.
 */
void
callout_destroy(callout_t *cs)
{
	callout_impl_t *c = (callout_impl_t *)cs;

	/*
	 * It's not necessary to lock in order to see the correct value
	 * of c->c_flags.  If the callout could potentially have been
	 * running, the current thread should have stopped it.
	 */
	KASSERT((c->c_flags & CALLOUT_PENDING) == 0);
	KASSERT(c->c_cpu->cc_lwp == curlwp || c->c_cpu->cc_active != c);
	KASSERTMSG(c->c_magic == CALLOUT_MAGIC,
	    "callout %p: c_magic (%#x) != CALLOUT_MAGIC (%#x)",
	    c, c->c_magic, CALLOUT_MAGIC);
	c->c_magic = 0;
}
Пример #17
0
static void
vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
{
	vnode_impl_t *node = VNODE_TO_VIMPL(vp);

	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
	if (node->vi_state == VS_MARKER)
		vnpanic(vp, "state is %s at %s:%d",
		    vstate_name(node->vi_state), func, line);

	while (node->vi_state != VS_ACTIVE && node->vi_state != VS_RECLAIMED)
		cv_wait(&vp->v_cv, vp->v_interlock);

	if (node->vi_state == VS_MARKER)
		vnpanic(vp, "state is %s at %s:%d",
		    vstate_name(node->vi_state), func, line);
}
Пример #18
0
static int
mpu_current_frequency_sysctl_helper(SYSCTLFN_ARGS)
{
	struct sysctlnode node = *rnode;
	int freq = mpu_frequency;
	int old_freq = freq;

	KASSERTMSG(curcpu()->ci_data.cpu_cc_freq == mpu_frequency * 1000000,
	    "cc_freq %"PRIu64" mpu_freq %u000000",
	    curcpu()->ci_data.cpu_cc_freq, mpu_frequency);

	node.sysctl_data = &freq;

	int error = sysctl_lookup(SYSCTLFN_CALL(&node));

	if (error || newp == NULL)
		return error;

	KASSERT(old_freq == mpu_frequency);

	error = EINVAL;
	for (size_t i = 0; i < __arraycount(mpu_frequencies); i++) {
		if (mpu_frequencies[i] == freq) {
			error = 0;
			break;
		}
	}
	if (error)
		return EINVAL;

	if (freq != old_freq) {
		const int s = splhigh();
		prcm_mpu_pll_config(freq);
		curcpu()->ci_data.cpu_cc_freq = freq * 1000000;
		mpu_frequency = freq;
		splx(s);
		aprint_normal_dev(curcpu()->ci_dev,
		    "frequency changed from %d MHz to %d MHz\n",
		    old_freq, freq);
		pmf_event_inject(NULL, PMFE_SPEED_CHANGED);
	}

	return 0;
}
Пример #19
0
int
process_read_fpregs(struct lwp *l, struct fpreg *fpregs, size_t *sz)
{
	struct pcb * const pcb = lwp_getpcb(l);

	if (l == curlwp) {
		/* Is the process using the fpu? */
		if (!fpu_valid_p()) {
			memset(fpregs, 0, sizeof (*fpregs));
			return 0;
		}
		fpu_save();
	} else {
		KASSERTMSG(l->l_pcu_cpu[PCU_FPU] == NULL,
		    "%s: FPU of l (%p) active on %s",
		     __func__, l, l->l_pcu_cpu[PCU_FPU]->ci_cpuname);
	}
	*fpregs = pcb->pcb_fpregs;
	return 0;
}
Пример #20
0
void
pic_do_pending_ints(register_t psw, int newipl, void *frame)
{
	struct cpu_info * const ci = curcpu();
	if (__predict_false(newipl == IPL_HIGH)) {
		KASSERTMSG(ci->ci_cpl == IPL_HIGH, "cpl %d", ci->ci_cpl);
		return;
	}
	while ((pic_pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) {
		KASSERT(pic_pending_ipls < __BIT(NIPL));
		for (;;) {
			int ipl = 31 - __builtin_clz(pic_pending_ipls);
			KASSERT(ipl < NIPL);
			if (ipl <= newipl)
				break;

			pic_set_priority(ci, ipl);
			pic_list_deliver_irqs(psw, ipl, frame);
			pic_list_unblock_irqs();
		}
	}
	if (ci->ci_cpl != newipl)
		pic_set_priority(ci, newipl);
}
Пример #21
0
/**
 * The code below assumes that we are dealing with hardware multi rate retry
 * I have no idea what will happen if you try to use this module with another
 * type of hardware. Your machine might catch fire or it might work with
 * horrible performance...
 */
static void
ath_rate_update(struct ath_softc *sc, struct ieee80211_node *ni, int rate)
{
	struct ath_node *an = ATH_NODE(ni);
	struct amrr_node *amn = ATH_NODE_AMRR(an);
	const HAL_RATE_TABLE *rt = sc->sc_currates;
	u_int8_t rix;

	KASSERTMSG(rt != NULL, "no rate table, mode %u", sc->sc_curmode);

	DPRINTF(sc, "%s: set xmit rate for %s to %dM\n",
	    __func__, ether_sprintf(ni->ni_macaddr),
	    ni->ni_rates.rs_nrates > 0 ?
		(ni->ni_rates.rs_rates[rate] & IEEE80211_RATE_VAL) / 2 : 0);

	ni->ni_txrate = rate;
	/*
	 * Before associating a node has no rate set setup
	 * so we can't calculate any transmit codes to use.
	 * This is ok since we should never be sending anything
	 * but management frames and those always go at the
	 * lowest hardware rate.
	 */
	if (ni->ni_rates.rs_nrates > 0) {
		amn->amn_tx_rix0 = sc->sc_rixmap[
					       ni->ni_rates.rs_rates[rate] & IEEE80211_RATE_VAL];
		amn->amn_tx_rate0 = rt->info[amn->amn_tx_rix0].rateCode;
		amn->amn_tx_rate0sp = amn->amn_tx_rate0 |
			rt->info[amn->amn_tx_rix0].shortPreamble;
		if (sc->sc_mrretry) {
			amn->amn_tx_try0 = 1;
			amn->amn_tx_try1 = 1;
			amn->amn_tx_try2 = 1;
			amn->amn_tx_try3 = 1;
			if (--rate >= 0) {
				rix = sc->sc_rixmap[
						    ni->ni_rates.rs_rates[rate]&IEEE80211_RATE_VAL];
				amn->amn_tx_rate1 = rt->info[rix].rateCode;
				amn->amn_tx_rate1sp = amn->amn_tx_rate1 |
					rt->info[rix].shortPreamble;
			} else {
				amn->amn_tx_rate1 = amn->amn_tx_rate1sp = 0;
			}
			if (--rate >= 0) {
				rix = sc->sc_rixmap[
						    ni->ni_rates.rs_rates[rate]&IEEE80211_RATE_VAL];
				amn->amn_tx_rate2 = rt->info[rix].rateCode;
				amn->amn_tx_rate2sp = amn->amn_tx_rate2 |
					rt->info[rix].shortPreamble;
			} else {
				amn->amn_tx_rate2 = amn->amn_tx_rate2sp = 0;
			}
			if (rate > 0) {
				/* NB: only do this if we didn't already do it above */
				amn->amn_tx_rate3 = rt->info[0].rateCode;
				amn->amn_tx_rate3sp =
					an->an_tx_rate3 | rt->info[0].shortPreamble;
			} else {
				amn->amn_tx_rate3 = amn->amn_tx_rate3sp = 0;
			}
		} else {
			amn->amn_tx_try0 = ATH_TXMAXTRY;
			/* theorically, these statements are useless because
			 *  the code which uses them tests for an_tx_try0 == ATH_TXMAXTRY
			 */
			amn->amn_tx_try1 = 0;
			amn->amn_tx_try2 = 0;
			amn->amn_tx_try3 = 0;
			amn->amn_tx_rate1 = amn->amn_tx_rate1sp = 0;
			amn->amn_tx_rate2 = amn->amn_tx_rate2sp = 0;
			amn->amn_tx_rate3 = amn->amn_tx_rate3sp = 0;
		}
	}
	node_reset (amn);
}
Пример #22
0
/*
 * cpu_lwp_fork: Finish a fork operation, with lwp l2 nearly set up.
 * Copy and update the pcb and trapframe, making the child ready to run.
 *
 * First LWP (l1) is the lwp being forked.  If it is &lwp0, then we are
 * creating a kthread, where return path and argument are specified
 * with `func' and `arg'.
 *
 * Rig the child's kernel stack so that it will start out in lwp_trampoline()
 * and call child_return() with l2 as an argument. This causes the
 * newly-created child process to go directly to user level with an apparent
 * return value of 0 from fork(), while the parent process returns normally.
 *
 * If an alternate user-level stack is requested (with non-zero values
 * in both the stack and stacksize arguments), then set up the user stack
 * pointer accordingly.
 */
void
cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
    void (*func)(void *), void *arg)
{
	struct pcb * const pcb1 = lwp_getpcb(l1);
	struct pcb * const pcb2 = lwp_getpcb(l2);
	struct trapframe *tf;

	KASSERT(l1 == curlwp || l1 == &lwp0);

	l2->l_md.md_ss_addr = 0;
	l2->l_md.md_ss_instr = 0;
	l2->l_md.md_astpending = 0;

	/* Copy the PCB from parent. */
	*pcb2 = *pcb1;

	/*
	 * Copy the trapframe from parent, so that return to userspace
	 * will be to right address, with correct registers.
	 */
	vaddr_t ua2 = uvm_lwp_getuarea(l2);
	tf = (struct trapframe *)(ua2 + USPACE) - 1;
	*tf = *l1->l_md.md_utf;

	/* If specified, set a different user stack for a child. */
	if (stack != NULL)
		tf->tf_regs[_R_SP] = (intptr_t)stack + stacksize;

	l2->l_md.md_utf = tf;
#if (USPACE > PAGE_SIZE) || !defined(_LP64)
	CTASSERT(__arraycount(l2->l_md.md_upte) >= UPAGES);
	for (u_int i = 0; i < __arraycount(l2->l_md.md_upte); i++) {
		l2->l_md.md_upte[i] = 0;
	}
	if (!pmap_md_direct_mapped_vaddr_p(ua2)) {
		CTASSERT((PGSHIFT == 12) == (UPAGES == 2));
		pt_entry_t * const pte = pmap_pte_lookup(pmap_kernel(), ua2);
		const uint32_t x = MIPS_HAS_R4K_MMU
		    ? (MIPS3_PG_RO | MIPS3_PG_WIRED)
		    : 0;

		for (u_int i = 0; i < UPAGES; i++) {
			KASSERT(pte_valid_p(pte[i]));
			l2->l_md.md_upte[i] = pte[i] & ~x;
		}
	}
#else
	KASSERT(pmap_md_direct_mapped_vaddr_p(ua2));
#endif
	/*
	 * Rig kernel stack so that it would start out in lwp_trampoline()
	 * and call child_return() with l as an argument.  This causes the
	 * newly-created child process to go directly to user level with a
	 * parent return value of 0 from fork(), while the parent process
	 * returns normally.
	 */

	pcb2->pcb_context.val[_L_S0] = (intptr_t)func;			/* S0 */
	pcb2->pcb_context.val[_L_S1] = (intptr_t)arg;			/* S1 */
	pcb2->pcb_context.val[MIPS_CURLWP_LABEL] = (intptr_t)l2;	/* T8 */
	pcb2->pcb_context.val[_L_SP] = (intptr_t)tf;			/* SP */
	pcb2->pcb_context.val[_L_RA] =
	   mips_locore_jumpvec.ljv_lwp_trampoline;			/* RA */
#if defined(_LP64) || defined(__mips_n32)
	KASSERT(tf->tf_regs[_R_SR] & MIPS_SR_KX);
	KASSERT(pcb2->pcb_context.val[_L_SR] & MIPS_SR_KX);
#endif
	KASSERTMSG(pcb2->pcb_context.val[_L_SR] & MIPS_SR_INT_IE,
	    "%d.%d %#"PRIxREGISTER,
	    l1->l_proc->p_pid, l1->l_lid,
	    pcb2->pcb_context.val[_L_SR]);
}
Пример #23
0
void
pic_add(struct pic_softc *pic, int irqbase)
{
	int slot, maybe_slot = -1;

	KASSERT(strlen(pic->pic_name) > 0);

	for (slot = 0; slot < PIC_MAXPICS; slot++) {
		struct pic_softc * const xpic = pic_list[slot];
		if (xpic == NULL) {
			if (maybe_slot < 0)
				maybe_slot = slot;
			if (irqbase < 0)
				break;
			continue;
		}
		if (irqbase < 0 || xpic->pic_irqbase < 0)
			continue;
		if (irqbase >= xpic->pic_irqbase + xpic->pic_maxsources)
			continue;
		if (irqbase + pic->pic_maxsources <= xpic->pic_irqbase)
			continue;
		panic("pic_add: pic %s (%zu sources @ irq %u) conflicts"
		    " with pic %s (%zu sources @ irq %u)",
		    pic->pic_name, pic->pic_maxsources, irqbase,
		    xpic->pic_name, xpic->pic_maxsources, xpic->pic_irqbase);
	}
	slot = maybe_slot;
#if 0
	printf("%s: pic_sourcebase=%zu pic_maxsources=%zu\n",
	    pic->pic_name, pic_sourcebase, pic->pic_maxsources);
#endif
	KASSERTMSG(pic->pic_maxsources <= PIC_MAXSOURCES, "%zu",
	    pic->pic_maxsources);
	KASSERT(pic_sourcebase + pic->pic_maxsources <= PIC_MAXMAXSOURCES);

	/*
	 * Allocate a pointer to each cpu's evcnts and then, for each cpu,
	 * allocate its evcnts and then attach an evcnt for each pin.
	 * We can't allocate the evcnt structures directly since
	 * percpu will move the contents of percpu memory around and 
	 * corrupt the pointers in the evcnts themselves.  Remember, any
	 * problem can be solved with sufficient indirection.
	 */
	pic->pic_percpu = percpu_alloc(sizeof(struct pic_percpu));
	KASSERT(pic->pic_percpu != NULL);

	/*
	 * Now allocate the per-cpu evcnts.
	 */
	percpu_foreach(pic->pic_percpu, pic_percpu_allocate, pic);

	pic->pic_sources = &pic_sources[pic_sourcebase];
	pic->pic_irqbase = irqbase;
	pic_sourcebase += pic->pic_maxsources;
	pic->pic_id = slot;
#ifdef __HAVE_PIC_SET_PRIORITY
	KASSERT((slot == 0) == (pic->pic_ops->pic_set_priority != NULL));
#endif
#ifdef MULTIPROCESSOR
	KASSERT((slot == 0) == (pic->pic_ops->pic_ipi_send != NULL));
#endif
	pic_list[slot] = pic;
}
Пример #24
0
/*
 * u_int initarm(...)
 *
 * Initial entry point on startup. This gets called before main() is
 * entered.
 * It should be responsible for setting up everything that must be
 * in place when main is called.
 * This includes
 *   Taking a copy of the boot configuration structure.
 *   Initialising the physical console so characters can be printed.
 *   Setting up page tables for the kernel
 *   Relocating the kernel to the bottom of physical memory
 */
u_int
initarm(void *arg)
{
	pmap_devmap_register(devmap);
	awin_bootstrap(AWIN_CORE_VBASE, CONADDR_VA);

	/* Heads up ... Setup the CPU / MMU / TLB functions. */
	if (set_cpufuncs())
		panic("cpu not recognized!");

	/* The console is going to try to map things.  Give pmap a devmap. */
	consinit();

#ifdef VERBOSE_INIT_ARM
	printf("\nuboot arg = %#x, %#x, %#x, %#x\n",
	    uboot_args[0], uboot_args[1], uboot_args[2], uboot_args[3]);
#endif

#ifdef KGDB
	kgdb_port_init();
#endif

	cpu_reset_address = awin_wdog_reset;

#ifdef VERBOSE_INIT_ARM
	/* Talk to the user */
	printf("\nNetBSD/evbarm (cubie) booting ...\n");
#endif

#ifdef BOOT_ARGS
	char mi_bootargs[] = BOOT_ARGS;
	parse_mi_bootargs(mi_bootargs);
#endif

#ifdef VERBOSE_INIT_ARM
	printf("initarm: Configuring system ...\n");

#if defined(CPU_CORTEXA7) || defined(CPU_CORTEXA9) || defined(CPU_CORTEXA15)
	printf("initarm: cbar=%#x\n", armreg_cbar_read());
#endif
#endif

	/*
	 * Set up the variables that define the availability of physical
	 * memory.
	 */
	psize_t ram_size = awin_memprobe();

	/*
	 * If MEMSIZE specified less than what we really have, limit ourselves
	 * to that.
	 */
#ifdef MEMSIZE
	if (ram_size == 0 || ram_size > (unsigned)MEMSIZE * 1024 * 1024)
		ram_size = (unsigned)MEMSIZE * 1024 * 1024;
#else
	KASSERTMSG(ram_size > 0, "RAM size unknown and MEMSIZE undefined");
#endif

	/* Fake bootconfig structure for the benefit of pmap.c. */
	bootconfig.dramblocks = 1;
	bootconfig.dram[0].address = AWIN_SDRAM_PBASE;
	bootconfig.dram[0].pages = ram_size / PAGE_SIZE;

#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
	const bool mapallmem_p = true;
	KASSERT(ram_size <= KERNEL_VM_BASE - KERNEL_BASE);
#else
	const bool mapallmem_p = false;
#endif
	KASSERT((armreg_pfr1_read() & ARM_PFR1_SEC_MASK) != 0);

	arm32_bootmem_init(bootconfig.dram[0].address, ram_size,
	    KERNEL_BASE_PHYS);
	arm32_kernel_vm_init(KERNEL_VM_BASE, ARM_VECTORS_LOW, 0, devmap,
	    mapallmem_p);

	if (mapallmem_p) {
		/*
		 * "bootargs" env variable is passed as 4th argument
		 * to kernel but it's using the physical address and
		 * we to convert that to a virtual address.
		 */
		if (uboot_args[3] - AWIN_SDRAM_PBASE < ram_size) {
			const char * const args = (const char *)
			     (uboot_args[3] + KERNEL_PHYS_VOFFSET);
			strlcpy(bootargs, args, sizeof(bootargs));
		}
	}

	boot_args = bootargs;
	parse_mi_bootargs(boot_args);

	/* we've a specific device_register routine */
	evbarm_device_register = cubie_device_register;

#if NAWIN_FB > 0
	char *ptr;
	if (get_bootconf_option(boot_args, "console",
		    BOOTOPT_TYPE_STRING, &ptr) && strncmp(ptr, "fb", 2) == 0) {
		use_fb_console = true;
	}
#endif
	
	return initarm_common(KERNEL_VM_BASE, KERNEL_VM_SIZE, NULL, 0);

}
Пример #25
0
void
cpu_hatch(struct cpu_info *ci)
{
	struct pmap_tlb_info * const ti = ci->ci_tlb_info;

	/*
	 * Invalidate all the TLB enties (even wired ones) and then reserve
	 * space for the wired TLB entries.
	 */
	mips3_cp0_wired_write(0);
	tlb_invalidate_all();
	mips3_cp0_wired_write(ti->ti_wired);

	/*
	 * Setup HWRENA and USERLOCAL COP0 registers (MIPSxxR2).
	 */
	cpu_hwrena_setup();

	/*
	 * If we are using register zero relative addressing to access cpu_info
	 * in the exception vectors, enter that mapping into TLB now.
	 */
	if (ci->ci_tlb_slot >= 0) {
		const uint32_t tlb_lo = MIPS3_PG_G|MIPS3_PG_V
		    | mips3_paddr_to_tlbpfn((vaddr_t)ci);
		const struct tlbmask tlbmask = {
			.tlb_hi = -PAGE_SIZE | KERNEL_PID,
#if (PGSHIFT & 1)
			.tlb_lo0 = tlb_lo,
			.tlb_lo1 = tlb_lo + MIPS3_PG_NEXT,
#else
			.tlb_lo0 = 0,
			.tlb_lo1 = tlb_lo,
#endif
			.tlb_mask = -1,
		};

		tlb_invalidate_addr(tlbmask.tlb_hi, KERNEL_PID);
		tlb_write_entry(ci->ci_tlb_slot, &tlbmask);
	}

	/*
	 * Flush the icache just be sure.
	 */
	mips_icache_sync_all();

	/*
	 * Let this CPU do its own initialization (for things that have to be
	 * done on the local CPU).
	 */
	(*mips_locoresw.lsw_cpu_init)(ci);

	// Show this CPU as present.
	atomic_or_ulong(&ci->ci_flags, CPUF_PRESENT);

	/*
	 * Announce we are hatched
	 */
	kcpuset_atomic_set(cpus_hatched, cpu_index(ci));

	/*
	 * Now wait to be set free!
	 */
	while (! kcpuset_isset(cpus_running, cpu_index(ci))) {
		/* spin, spin, spin */
	}

	/*
	 * initialize the MIPS count/compare clock
	 */
	mips3_cp0_count_write(ci->ci_data.cpu_cc_skew);
	KASSERT(ci->ci_cycles_per_hz != 0);
	ci->ci_next_cp0_clk_intr = ci->ci_data.cpu_cc_skew + ci->ci_cycles_per_hz;
	mips3_cp0_compare_write(ci->ci_next_cp0_clk_intr);
	ci->ci_data.cpu_cc_skew = 0;

	/*
	 * Let this CPU do its own post-running initialization
	 * (for things that have to be done on the local CPU).
	 */
	(*mips_locoresw.lsw_cpu_run)(ci);

	/*
	 * Now turn on interrupts (and verify they are on).
	 */
	spl0();
	KASSERTMSG(ci->ci_cpl == IPL_NONE, "cpl %d", ci->ci_cpl);
	KASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);

	kcpuset_atomic_set(pmap_kernel()->pm_onproc, cpu_index(ci));
	kcpuset_atomic_set(pmap_kernel()->pm_active, cpu_index(ci));

	/*
	 * And do a tail call to idle_loop
	 */
	idle_loop(NULL);
}

void
cpu_boot_secondary_processors(void)
{
	CPU_INFO_ITERATOR cii;
	struct cpu_info *ci;
	for (CPU_INFO_FOREACH(cii, ci)) {
		if (CPU_IS_PRIMARY(ci))
			continue;
		KASSERT(ci->ci_data.cpu_idlelwp);

		/*
		 * Skip this CPU if it didn't sucessfully hatch.
		 */
		if (!kcpuset_isset(cpus_hatched, cpu_index(ci)))
			continue;

		ci->ci_data.cpu_cc_skew = mips3_cp0_count_read();
		atomic_or_ulong(&ci->ci_flags, CPUF_RUNNING);
		kcpuset_set(cpus_running, cpu_index(ci));
		// Spin until the cpu calls idle_loop
		for (u_int i = 0; i < 100; i++) {
			if (kcpuset_isset(cpus_running, cpu_index(ci)))
				break;
			delay(1000);
		}
	}
}
Пример #26
0
/*
 * Initialize the tables for a node.
 */
static void
ath_rate_ctl_reset(struct ath_softc *sc, struct ieee80211_node *ni)
{
#define	RATE(_ix)	(ni->ni_rates.rs_rates[(_ix)] & IEEE80211_RATE_VAL)
	struct ieee80211com *ic = &sc->sc_ic;
	struct ath_node *an = ATH_NODE(ni);
	struct sample_node *sn = ATH_NODE_SAMPLE(an);
	const HAL_RATE_TABLE *rt = sc->sc_currates;
	int x, y, srate;

	KASSERTMSG(rt != NULL, "no rate table, mode %u", sc->sc_curmode);
        sn->static_rate_ndx = -1;
	if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
		/*
		 * A fixed rate is to be used; ic_fixed_rate is an
		 * index into the supported rate set.  Convert this
		 * to the index into the negotiated rate set for
		 * the node.  We know the rate is there because the
		 * rate set is checked when the station associates.
		 */
		const struct ieee80211_rateset *rs =
			&ic->ic_sup_rates[ic->ic_curmode];
		int r = rs->rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
		/* NB: the rate set is assumed sorted */
		srate = ni->ni_rates.rs_nrates - 1;
		for (; srate >= 0 && RATE(srate) != r; srate--)
			;
		KASSERTMSG(srate >= 0,
			"fixed rate %d not in rate set", ic->ic_fixed_rate);
                sn->static_rate_ndx = srate;
	}

        DPRINTF(sc, "%s: %s size 1600 rate/tt", __func__, ether_sprintf(ni->ni_macaddr));

	sn->num_rates = ni->ni_rates.rs_nrates;
        for (x = 0; x < ni->ni_rates.rs_nrates; x++) {
		sn->rates[x].rate = ni->ni_rates.rs_rates[x] & IEEE80211_RATE_VAL;
		sn->rates[x].rix = sc->sc_rixmap[sn->rates[x].rate];
		sn->rates[x].rateCode = rt->info[sn->rates[x].rix].rateCode;
		sn->rates[x].shortPreambleRateCode = 
			rt->info[sn->rates[x].rix].rateCode | 
			rt->info[sn->rates[x].rix].shortPreamble;

		DPRINTF(sc, " %d/%d", sn->rates[x].rate,
			calc_usecs_unicast_packet(sc, 1600, sn->rates[x].rix, 
						  0,0));
	}
	DPRINTF(sc, "%s\n", "");
	
	/* set the visible bit-rate to the lowest one available */
	ni->ni_txrate = 0;
	sn->num_rates = ni->ni_rates.rs_nrates;
	
	for (y = 0; y < NUM_PACKET_SIZE_BINS; y++) {
		int size = bin_to_size(y);
		int ndx = 0;
		sn->packets_sent[y] = 0;
		sn->current_sample_ndx[y] = -1;
		sn->last_sample_ndx[y] = 0;
		
		for (x = 0; x < ni->ni_rates.rs_nrates; x++) {
			sn->stats[y][x].successive_failures = 0;
			sn->stats[y][x].tries = 0;
			sn->stats[y][x].total_packets = 0;
			sn->stats[y][x].packets_acked = 0;
			sn->stats[y][x].last_tx = 0;
			
			sn->stats[y][x].perfect_tx_time = 
				calc_usecs_unicast_packet(sc, size, 
							  sn->rates[x].rix,
							  0, 0);
			sn->stats[y][x].average_tx_time = sn->stats[y][x].perfect_tx_time;
		}

		/* set the initial rate */
		for (ndx = sn->num_rates-1; ndx > 0; ndx--) {
			if (sn->rates[ndx].rate <= 72) {
				break;
			}
		}
		sn->current_rate[y] = ndx;
	}

	DPRINTF(sc, "%s: %s %d rates %d%sMbps (%dus)- %d%sMbps (%dus)\n",
		__func__, ether_sprintf(ni->ni_macaddr), 
		sn->num_rates,
		sn->rates[0].rate/2, sn->rates[0].rate % 0x1 ? ".5" : "",
		sn->stats[1][0].perfect_tx_time,
		sn->rates[sn->num_rates-1].rate/2,
			sn->rates[sn->num_rates-1].rate % 0x1 ? ".5" : "",
		sn->stats[1][sn->num_rates-1].perfect_tx_time
	);

	ni->ni_txrate = sn->current_rate[0];
#undef RATE
}
Пример #27
0
/*
 * Interrupt handler allocation utility. This function calls each allocation
 * function as specified by arguments.
 * Currently callee functions are pci_intx_alloc(), pci_msi_alloc_exact(),
 * and pci_msix_alloc_exact().
 * pa       : pci_attach_args
 * ihps     : interrupt handlers
 * counts   : The array of number of required interrupt handlers.
 *            It is overwritten by allocated the number of handlers.
 *            CAUTION: The size of counts[] must be PCI_INTR_TYPE_SIZE.
 * max_type : "max" type of using interrupts. See below.
 *     e.g.
 *         If you want to use 5 MSI-X, 1 MSI, or INTx, you use "counts" as
 *             int counts[PCI_INTR_TYPE_SIZE];
 *             counts[PCI_INTR_TYPE_MSIX] = 5;
 *             counts[PCI_INTR_TYPE_MSI] = 1;
 *             counts[PCI_INTR_TYPE_INTX] = 1;
 *             error = pci_intr_alloc(pa, ihps, counts, PCI_INTR_TYPE_MSIX);
 *
 *         If you want to use hardware max number MSI-X or 1 MSI,
 *         and not to use INTx, you use "counts" as
 *             int counts[PCI_INTR_TYPE_SIZE];
 *             counts[PCI_INTR_TYPE_MSIX] = -1;
 *             counts[PCI_INTR_TYPE_MSI] = 1;
 *             counts[PCI_INTR_TYPE_INTX] = 0;
 *             error = pci_intr_alloc(pa, ihps, counts, PCI_INTR_TYPE_MSIX);
 *
 *         If you want to use 3 MSI or INTx, you can use "counts" as
 *             int counts[PCI_INTR_TYPE_SIZE];
 *             counts[PCI_INTR_TYPE_MSI] = 3;
 *             counts[PCI_INTR_TYPE_INTX] = 1;
 *             error = pci_intr_alloc(pa, ihps, counts, PCI_INTR_TYPE_MSI);
 *
 *         If you want to use 1 MSI or INTx (probably most general usage),
 *         you can simply use this API like
 *         below
 *             error = pci_intr_alloc(pa, ihps, NULL, 0);
 *                                                    ^ ignored
 */
int
pci_intr_alloc(const struct pci_attach_args *pa, pci_intr_handle_t **ihps,
    int *counts, pci_intr_type_t max_type)
{
	int error;
	int intx_count, msi_count, msix_count;

	intx_count = msi_count = msix_count = 0;
	if (counts == NULL) { /* simple pattern */
		msi_count = 1;
		intx_count = 1;
	} else {
		switch(max_type) {
		case PCI_INTR_TYPE_MSIX:
			msix_count = counts[PCI_INTR_TYPE_MSIX];
			/* FALLTHROUGH */
		case PCI_INTR_TYPE_MSI:
			msi_count = counts[PCI_INTR_TYPE_MSI];
			/* FALLTHROUGH */
		case PCI_INTR_TYPE_INTX:
			intx_count = counts[PCI_INTR_TYPE_INTX];
			break;
		default:
			return EINVAL;
		}
	}

	if (counts != NULL)
		memset(counts, 0, sizeof(counts[0]) * PCI_INTR_TYPE_SIZE);
	error = EINVAL;

	/* try MSI-X */
	if (msix_count == -1) /* use hardware max */
		msix_count = pci_msix_count(pa->pa_pc, pa->pa_tag);
	if (msix_count > 0) {
		error = pci_msix_alloc_exact(pa, ihps, msix_count);
		if (error == 0) {
			KASSERTMSG(counts != NULL,
			    "If MSI-X is used, counts must not be NULL.");
			counts[PCI_INTR_TYPE_MSIX] = msix_count;
			goto out;
		}
	}

	/* try MSI */
	if (msi_count == -1) /* use hardware max */
		msi_count = pci_msi_count(pa->pa_pc, pa->pa_tag);
	if (msi_count > 0) {
		error = pci_msi_alloc_exact(pa, ihps, msi_count);
		if (error == 0) {
			if (counts != NULL)
				counts[PCI_INTR_TYPE_MSI] = msi_count;
			goto out;
		}
	}

	/* try INTx */
	if (intx_count != 0) { /* The number of INTx is always 1. */
		error = pci_intx_alloc(pa, ihps);
		if (error == 0) {
			if (counts != NULL)
				counts[PCI_INTR_TYPE_INTX] = 1;
		}
	}

 out:
	return error;
}
Пример #28
0
/*
 * u_int initarm(...)
 *
 * Initial entry point on startup. This gets called before main() is
 * entered.
 * It should be responsible for setting up everything that must be
 * in place when main is called.
 * This includes
 *   Taking a copy of the boot configuration structure.
 *   Initialising the physical console so characters can be printed.
 *   Setting up page tables for the kernel
 *   Relocating the kernel to the bottom of physical memory
 */
u_int
initarm(void *arg)
{
	pmap_devmap_register(devmap);
	awin_bootstrap(AWIN_CORE_VBASE, CONADDR_VA);

	/* Heads up ... Setup the CPU / MMU / TLB functions. */
	if (set_cpufuncs())
		panic("cpu not recognized!");

	/* The console is going to try to map things.  Give pmap a devmap. */
	consinit();

#ifdef VERBOSE_INIT_ARM
	printf("\nuboot arg = %#"PRIxPTR", %#"PRIxPTR", %#"PRIxPTR", %#"PRIxPTR"\n",
	    uboot_args[0], uboot_args[1], uboot_args[2], uboot_args[3]);
#endif

#ifdef KGDB
	kgdb_port_init();
#endif

	cpu_reset_address = awin_wdog_reset;

#ifdef VERBOSE_INIT_ARM
	/* Talk to the user */
	printf("\nNetBSD/evbarm (" BOARDTYPE ") booting ...\n");
#endif

#ifdef BOOT_ARGS
	char mi_bootargs[] = BOOT_ARGS;
	parse_mi_bootargs(mi_bootargs);
#endif

#ifdef VERBOSE_INIT_ARM
	printf("initarm: Configuring system ...\n");

#if defined(CPU_CORTEXA7) || defined(CPU_CORTEXA9) || defined(CPU_CORTEXA15)
	if (!CPU_ID_CORTEX_A8_P(curcpu()->ci_arm_cpuid)) {
		printf("initarm: cbar=%#x\n", armreg_cbar_read());
	}
#endif
#endif

	/*
	 * Set up the variables that define the availability of physical
	 * memory.
	 */
	psize_t ram_size = awin_memprobe();

#if AWIN_board == AWIN_cubieboard
	/* the cubietruck has 2GB whereas the cubieboards only has 1GB */
	cubietruck_p = (ram_size == 0x80000000);
#endif

	/*
	 * If MEMSIZE specified less than what we really have, limit ourselves
	 * to that.
	 */
#ifdef MEMSIZE
	if (ram_size == 0 || ram_size > (unsigned)MEMSIZE * 1024 * 1024)
		ram_size = (unsigned)MEMSIZE * 1024 * 1024;
#else
	KASSERTMSG(ram_size > 0, "RAM size unknown and MEMSIZE undefined");
#endif

	/*
	 * Configure DMA tags
	 */
	awin_dma_bootstrap(ram_size);

	/* Fake bootconfig structure for the benefit of pmap.c. */
	bootconfig.dramblocks = 1;
	bootconfig.dram[0].address = AWIN_SDRAM_PBASE;
	bootconfig.dram[0].pages = ram_size / PAGE_SIZE;

#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
	const bool mapallmem_p = true;
#ifndef PMAP_NEED_ALLOC_POOLPAGE
	if (ram_size > KERNEL_VM_BASE - KERNEL_BASE) {
		printf("%s: dropping RAM size from %luMB to %uMB\n",
		   __func__, (unsigned long) (ram_size >> 20),
		   (KERNEL_VM_BASE - KERNEL_BASE) >> 20);
		ram_size = KERNEL_VM_BASE - KERNEL_BASE;
	}
Пример #29
0
static void
ath_rate_update(struct ath_softc *sc, struct ieee80211_node *ni, int rate)
{
	struct ath_node *an = ATH_NODE(ni);
	struct onoe_node *on = ATH_NODE_ONOE(an);
	const HAL_RATE_TABLE *rt = sc->sc_currates;
	u_int8_t rix;

	KASSERTMSG(rt != NULL, "no rate table, mode %u", sc->sc_curmode);

	DPRINTF(sc, "%s: set xmit rate for %s to %dM\n",
	    __func__, ether_sprintf(ni->ni_macaddr),
	    ni->ni_rates.rs_nrates > 0 ?
		(ni->ni_rates.rs_rates[rate] & IEEE80211_RATE_VAL) / 2 : 0);

	ni->ni_txrate = rate;
	/*
	 * Before associating a node has no rate set setup
	 * so we can't calculate any transmit codes to use.
	 * This is ok since we should never be sending anything
	 * but management frames and those always go at the
	 * lowest hardware rate.
	 */
	if (ni->ni_rates.rs_nrates == 0)
		goto done;
	on->on_tx_rix0 = sc->sc_rixmap[
		ni->ni_rates.rs_rates[rate] & IEEE80211_RATE_VAL];
	on->on_tx_rate0 = rt->info[on->on_tx_rix0].rateCode;
	
	on->on_tx_rate0sp = on->on_tx_rate0 |
		rt->info[on->on_tx_rix0].shortPreamble;
	if (sc->sc_mrretry) {
		/*
		 * Hardware supports multi-rate retry; setup two
		 * step-down retry rates and make the lowest rate
		 * be the ``last chance''.  We use 4, 2, 2, 2 tries
		 * respectively (4 is set here, the rest are fixed
		 * in the xmit routine).
		 */
		on->on_tx_try0 = 1 + 3;		/* 4 tries at rate 0 */
		if (--rate >= 0) {
			rix = sc->sc_rixmap[
				ni->ni_rates.rs_rates[rate]&IEEE80211_RATE_VAL];
			on->on_tx_rate1 = rt->info[rix].rateCode;
			on->on_tx_rate1sp = on->on_tx_rate1 |
				rt->info[rix].shortPreamble;
		} else {
			on->on_tx_rate1 = on->on_tx_rate1sp = 0;
		}
		if (--rate >= 0) {
			rix = sc->sc_rixmap[
				ni->ni_rates.rs_rates[rate]&IEEE80211_RATE_VAL];
			on->on_tx_rate2 = rt->info[rix].rateCode;
			on->on_tx_rate2sp = on->on_tx_rate2 |
				rt->info[rix].shortPreamble;
		} else {
			on->on_tx_rate2 = on->on_tx_rate2sp = 0;
		}
		if (rate > 0) {
			/* NB: only do this if we didn't already do it above */
			on->on_tx_rate3 = rt->info[0].rateCode;
			on->on_tx_rate3sp =
				on->on_tx_rate3 | rt->info[0].shortPreamble;
		} else {
			on->on_tx_rate3 = on->on_tx_rate3sp = 0;
		}
	} else {
		on->on_tx_try0 = ATH_TXMAXTRY;	/* max tries at rate 0 */
		on->on_tx_rate1 = on->on_tx_rate1sp = 0;
		on->on_tx_rate2 = on->on_tx_rate2sp = 0;
		on->on_tx_rate3 = on->on_tx_rate3sp = 0;
	}
done:
	on->on_tx_ok = on->on_tx_err = on->on_tx_retr = on->on_tx_upper = 0;
}
Пример #30
0
void
ath_rate_findrate(struct ath_softc *sc, struct ath_node *an,
		  int shortPreamble, size_t frameLen,
		  u_int8_t *rix, int *try0, u_int8_t *txrate)
{
	struct sample_node *sn = ATH_NODE_SAMPLE(an);
	struct sample_softc *ssc = ATH_SOFTC_SAMPLE(sc);
	struct ieee80211com *ic = &sc->sc_ic;
	int ndx, size_bin, mrr, best_ndx, change_rates;
	unsigned average_tx_time;

	mrr = sc->sc_mrretry && !(ic->ic_flags & IEEE80211_F_USEPROT);
	size_bin = size_to_bin(frameLen);
	best_ndx = best_rate_ndx(sn, size_bin, !mrr);

	if (best_ndx >= 0) {
		average_tx_time = sn->stats[size_bin][best_ndx].average_tx_time;
	} else {
		average_tx_time = 0;
	}
	
	if (sn->static_rate_ndx != -1) {
		ndx = sn->static_rate_ndx;
		*try0 = ATH_TXMAXTRY;
	} else {
		*try0 = mrr ? 2 : ATH_TXMAXTRY;
		
		if (sn->sample_tt[size_bin] < average_tx_time * (sn->packets_since_sample[size_bin]*ssc->ath_sample_rate/100)) {
			/*
			 * we want to limit the time measuring the performance
			 * of other bit-rates to ath_sample_rate% of the
			 * total transmission time.
			 */
			ndx = pick_sample_ndx(sn, size_bin);
			if (ndx != sn->current_rate[size_bin]) {
				sn->current_sample_ndx[size_bin] = ndx;
			} else {
				sn->current_sample_ndx[size_bin] = -1;
			}
			sn->packets_since_sample[size_bin] = 0;

		} else {
			change_rates = 0;
			if (!sn->packets_sent[size_bin] || best_ndx == -1) {
				/* no packet has been sent successfully yet */
				for (ndx = sn->num_rates-1; ndx > 0; ndx--) {
					/* 
					 * pick the highest rate <= 36 Mbps
					 * that hasn't failed.
					 */
					if (sn->rates[ndx].rate <= 72 && 
					    sn->stats[size_bin][ndx].successive_failures == 0) {
						break;
					}
				}
				change_rates = 1;
				best_ndx = ndx;
			} else if (sn->packets_sent[size_bin] < 20) {
				/* let the bit-rate switch quickly during the first few packets */
				change_rates = 1;
			} else if (ticks - ((hz*MIN_SWITCH_MS)/1000) > sn->ticks_since_switch[size_bin]) {
				/* 2 seconds have gone by */
				change_rates = 1;
			} else if (average_tx_time * 2 < sn->stats[size_bin][sn->current_rate[size_bin]].average_tx_time) {
				/* the current bit-rate is twice as slow as the best one */
				change_rates = 1;
			}

			sn->packets_since_sample[size_bin]++;
			
			if (change_rates) {
				if (best_ndx != sn->current_rate[size_bin]) {
					DPRINTF(sc, "%s: %s size %d switch rate %d (%d/%d) -> %d (%d/%d) after %d packets mrr %d\n",
						__func__,
						ether_sprintf(an->an_node.ni_macaddr),
						packet_size_bins[size_bin],
						sn->rates[sn->current_rate[size_bin]].rate,
						sn->stats[size_bin][sn->current_rate[size_bin]].average_tx_time,
						sn->stats[size_bin][sn->current_rate[size_bin]].perfect_tx_time,
						sn->rates[best_ndx].rate,
						sn->stats[size_bin][best_ndx].average_tx_time,
						sn->stats[size_bin][best_ndx].perfect_tx_time,
						sn->packets_since_switch[size_bin],
						mrr);
				}
				sn->packets_since_switch[size_bin] = 0;
				sn->current_rate[size_bin] = best_ndx;
				sn->ticks_since_switch[size_bin] = ticks;
			}
			ndx = sn->current_rate[size_bin];
			sn->packets_since_switch[size_bin]++;
			if (size_bin == 0) {
	    			/* 
	    			 * set the visible txrate for this node
			         * to the rate of small packets
			         */
				an->an_node.ni_txrate = ndx;
			}
		}
	}

	KASSERTMSG(ndx >= 0 && ndx < sn->num_rates, "ndx is %d", ndx);

	*rix = sn->rates[ndx].rix;
	if (shortPreamble) {
		*txrate = sn->rates[ndx].shortPreambleRateCode;
	} else {
		*txrate = sn->rates[ndx].rateCode;
	}
	sn->packets_sent[size_bin]++;
}