示例#1
0
文件: qman.c 项目: 2asoft/freebsd
t_Error
qman_fqr_register_cb(t_Handle fqr, t_QmReceivedFrameCallback *callback,
    t_Handle app)
{
	struct qman_softc *sc;
	t_Error error;
	t_Handle portal;

	sc = qman_sc;
	sched_pin();

	/* Ensure we have got QMan port initialized */
	portal = qman_portal_setup(sc);
	if (portal == NULL) {
		device_printf(sc->sc_dev, "could not setup QMan portal\n");
		sched_unpin();
		return (E_NOT_SUPPORTED);
	}

	error = QM_FQR_RegisterCB(fqr, callback, app);

	sched_unpin();

	return (error);
}
示例#2
0
文件: qman.c 项目: 2asoft/freebsd
uint32_t
qman_fqr_get_counter(t_Handle fqr, uint32_t fqid_off,
    e_QmFqrCounters counter)
{
	struct qman_softc *sc;
	uint32_t val;
	t_Handle portal;

	sc = qman_sc;
	sched_pin();

	/* Ensure we have got QMan port initialized */
	portal = qman_portal_setup(sc);
	if (portal == NULL) {
		device_printf(sc->sc_dev, "could not setup QMan portal\n");
		sched_unpin();
		return (0);
	}

	val = QM_FQR_GetCounter(fqr, portal, fqid_off, counter);

	sched_unpin();

	return (val);
}
示例#3
0
文件: function.c 项目: ps4dev/ps4sdk
int ps4KernelFunctionHookDestroy(Ps4KernelFunctionHook *hook)
{
	Ps4KernelFunctionHookArgument *arg = (Ps4KernelFunctionHookArgument *)hook;
	uint16_t fnBits;

	Ps4ExceptionTryCatchFinally(ps4KernelFunctionHookExceptionHandler, &arg);

	if(ps4KernelFunctionHookMutex == NULL)
		ps4ExpressionReturnOnError(ps4KernelFunctionHookInitialize());

	ps4ExceptionLeaveIf(PS4_ERROR_ARGUMENT_PRIMARY_MISSING, hook == NULL);

	mtx_lock(ps4KernelFunctionHookMutex);

	sched_pin();
	ps4KernelFunctionLock(arg->function, &fnBits);
	ps4KernelMemoryCopy((uint8_t *)arg->bridge + 2, (uint8_t *)arg->function + 2, arg->bridgeCopiedSize - 2);
	ps4KernelMemoryCopy(arg->bridge, &fnBits, 2);
	ps4KernelFunctionUnlock(arg->function, fnBits);
	sched_unpin();

	pause("Waiting for entered hooks to complete the prologue", 100);
	ps4AtomicSpinLock64(&arg->lock);
	while(arg->entryCount > 0)
	{
		ps4AtomicSpinUnlock64(&arg->lock);
		pause("Waiting for entered hooks to complete the prologue", 100);
		ps4AtomicSpinLock64(&arg->lock);
	}
	ps4AtomicSpinUnlock64(&arg->lock);

	ps4ExceptionThrow(PS4_OK); // let catch clean up, return OK
}
示例#4
0
文件: function.c 项目: ps4dev/ps4sdk
int ps4KernelFunctionUnlock(void *function, uint16_t bits)
{
	if(function == NULL)
		return PS4_ERROR_ARGUMENT_PRIMARY_MISSING;

	sched_pin();
	ps4KernelProtectionWriteDisable();
	ps4AtomicSwap16(function, &bits);
	ps4KernelProtectionWriteEnable();
	sched_unpin();

	return PS4_OK;
}
示例#5
0
void
openpic_ipi(device_t dev, u_int cpu)
{
	struct openpic_softc *sc;

	KASSERT(dev == root_pic, ("Cannot send IPIs from non-root OpenPIC"));

	sc = device_get_softc(dev);
	sched_pin();
	openpic_write(sc, OPENPIC_PCPU_IPI_DISPATCH(PCPU_GET(cpuid), 0),
	    1u << cpu);
	sched_unpin();
}
示例#6
0
int
bman_put_buffer(t_Handle pool, void *buffer)
{
	struct bman_softc *sc;
	t_Handle portal;
	int error;

	sc = bman_sc;
	sched_pin();

	portal = bman_portal_setup(sc);
	if (portal == NULL) {
		sched_unpin();
		return (EIO);
	}

	error = BM_POOL_PutBuf(pool, portal, buffer);

	sched_unpin();

	return ((error == E_OK) ? 0 : EIO);
}
示例#7
0
void *
bman_get_buffer(t_Handle pool)
{
	struct bman_softc *sc;
	t_Handle portal;
	void *buffer;

	sc = bman_sc;
	sched_pin();

	portal = bman_portal_setup(sc);
	if (portal == NULL) {
		sched_unpin();
		return (NULL);
	}

	buffer = BM_POOL_GetBuf(pool, portal);

	sched_unpin();

	return (buffer);
}
示例#8
0
int
bman_pool_fill(t_Handle pool, uint16_t nbufs)
{
	struct bman_softc *sc;
	t_Handle portal;
	int error;

	sc = bman_sc;
	sched_pin();

	portal = bman_portal_setup(sc);
	if (portal == NULL) {
		sched_unpin();
		return (EIO);
	}

	error = BM_POOL_FillBufs(pool, portal, nbufs);

	sched_unpin();

	return ((error == E_OK) ? 0 : EIO);
}
示例#9
0
static __inline void
openpic_set_priority(struct openpic_softc *sc, int pri)
{
	u_int tpr;
	uint32_t x;

	sched_pin();
	tpr = OPENPIC_PCPU_TPR((sc->sc_dev == root_pic) ? PCPU_GET(cpuid) : 0);
	x = openpic_read(sc, tpr);
	x &= ~OPENPIC_TPR_MASK;
	x |= pri;
	openpic_write(sc, tpr, x);
	sched_unpin();
}
示例#10
0
文件: qman.c 项目: 2asoft/freebsd
t_Error
qman_fqr_pull_frame(t_Handle fqr, uint32_t fqid_off, t_DpaaFD *frame)
{
	struct qman_softc *sc;
	t_Error error;
	t_Handle portal;

	sc = qman_sc;
	sched_pin();

	/* Ensure we have got QMan port initialized */
	portal = qman_portal_setup(sc);
	if (portal == NULL) {
		device_printf(sc->sc_dev, "could not setup QMan portal\n");
		sched_unpin();
		return (E_NOT_SUPPORTED);
	}

	error = QM_FQR_PullFrame(fqr, portal, fqid_off, frame);

	sched_unpin();

	return (error);
}
示例#11
0
文件: qman.c 项目: 2asoft/freebsd
uint32_t
qman_fqr_get_base_fqid(t_Handle fqr)
{
	struct qman_softc *sc;
	uint32_t val;
	t_Handle portal;

	sc = qman_sc;
	sched_pin();

	/* Ensure we have got QMan port initialized */
	portal = qman_portal_setup(sc);
	if (portal == NULL) {
		device_printf(sc->sc_dev, "could not setup QMan portal\n");
		sched_unpin();
		return (0);
	}

	val = QM_FQR_GetFqid(fqr);

	sched_unpin();

	return (val);
}
示例#12
0
文件: qman.c 项目: 2asoft/freebsd
t_Error
qman_poll(e_QmPortalPollSource source)
{
	struct qman_softc *sc;
	t_Error error;
	t_Handle portal;

	sc = qman_sc;
	sched_pin();

	/* Ensure we have got QMan port initialized */
	portal = qman_portal_setup(sc);
	if (portal == NULL) {
		device_printf(sc->sc_dev, "could not setup QMan portal\n");
		sched_unpin();
		return (E_NOT_SUPPORTED);
	}

	error = QM_Poll(sc->sc_qh, source);

	sched_unpin();

	return (error);
}
示例#13
0
文件: function.c 项目: ps4dev/ps4sdk
int ps4KernelFunctionLock(void *function, uint16_t *bits)
{
	if(function == NULL)
		return PS4_ERROR_ARGUMENT_PRIMARY_MISSING;
	if(bits == NULL)
		return PS4_ERROR_ARGUMENT_OUT_MISSING;

	sched_pin();
	ps4KernelMemoryCopy((void *)ps4PayloadLock, (void *)bits, ps4PayloadLockSize);
	ps4KernelProtectionWriteDisable();
	ps4AtomicSwap16(function, bits);
	ps4KernelProtectionWriteEnable();
	sched_unpin();

	return PS4_OK;
}
示例#14
0
static void
delay_tsc(int n)
{
	uint64_t end, now;

	/*
	 * Pin the current thread ensure correct behavior if the TSCs
	 * on different CPUs are not in sync.
	 */
	sched_pin();
	now = rdtsc();
	end = now + tsc_freq * n / 1000000;
	do {
		cpu_spinwait();
		now = rdtsc();
	} while (now < end);
	sched_unpin();
}
示例#15
0
void
delay_tick(int usec)
{
	u_long end;

	if (usec < 0)
		return;

	/*
	 * We avoid being migrated to another CPU with a possibly
	 * unsynchronized TICK timer while spinning.
	 */
	sched_pin();

	end = rd(tick) + (u_long)usec * PCPU_GET(clock) / 1000000;
	while (rd(tick) < end)
		cpu_spinwait();

	sched_unpin();
}
示例#16
0
void
dpaa_portal_map_registers(struct dpaa_portals_softc *sc)
{
	unsigned int cpu;

	sched_pin();
	cpu = PCPU_GET(cpuid);
	if (sc->sc_dp[cpu].dp_regs_mapped)
		goto out;

	tlb1_set_entry(rman_get_bushandle(sc->sc_rres[0]),
	    sc->sc_dp[cpu].dp_ce_pa, sc->sc_dp[cpu].dp_ce_size,
	    _TLB_ENTRY_MEM);
	tlb1_set_entry(rman_get_bushandle(sc->sc_rres[1]),
	    sc->sc_dp[cpu].dp_ci_pa, sc->sc_dp[cpu].dp_ci_size,
	    _TLB_ENTRY_IO);

	sc->sc_dp[cpu].dp_regs_mapped = 1;

out:
	sched_unpin();
}
示例#17
0
static void
i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt, unsigned first_entry,
    unsigned num_entries, vm_page_t *pages, uint32_t pte_flags)
{
	uint32_t *pt_vaddr, pte;
	struct sf_buf *sf;
	unsigned act_pd, first_pte;
	unsigned last_pte, i;
	vm_paddr_t page_addr;

	act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
	first_pte = first_entry % I915_PPGTT_PT_ENTRIES;

	while (num_entries) {
		last_pte = first_pte + num_entries;
		if (last_pte > I915_PPGTT_PT_ENTRIES)
			last_pte = I915_PPGTT_PT_ENTRIES;

		sched_pin();
		sf = sf_buf_alloc(ppgtt->pt_pages[act_pd], SFB_CPUPRIVATE);
		pt_vaddr = (uint32_t *)(uintptr_t)sf_buf_kva(sf);

		for (i = first_pte; i < last_pte; i++) {
			page_addr = VM_PAGE_TO_PHYS(*pages);
			pte = GEN6_PTE_ADDR_ENCODE(page_addr);
			pt_vaddr[i] = pte | pte_flags;

			pages++;
		}

		sf_buf_free(sf);
		sched_unpin();

		num_entries -= last_pte - first_pte;
		first_pte = 0;
		act_pd++;
	}
}
示例#18
0
static int
delay_tc(int n)
{
	struct timecounter *tc;
	timecounter_get_t *func;
	uint64_t end, freq, now;
	u_int last, mask, u;

	tc = timecounter;
	freq = atomic_load_acq_64(&tsc_freq);
	if (tsc_is_invariant && freq != 0) {
		func = get_tsc;
		mask = ~0u;
	} else {
		if (tc->tc_quality <= 0)
			return (0);
		func = tc->tc_get_timecount;
		mask = tc->tc_counter_mask;
		freq = tc->tc_frequency;
	}
	now = 0;
	end = freq * n / 1000000;
	if (func == get_tsc)
		sched_pin();
	last = func(tc) & mask;
	do {
		cpu_spinwait();
		u = func(tc) & mask;
		if (u < last)
			now += mask - last + u + 1;
		else
			now += u - last;
		last = u;
	} while (now < end);
	if (func == get_tsc)
		sched_unpin();
	return (1);
}
示例#19
0
void
_rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
{
	struct pcpu *pc;
	struct thread *td = tracker->rmp_thread;

	if (SCHEDULER_STOPPED())
		return;

	td->td_critnest++;	/* critical_enter(); */
	pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
	rm_tracker_remove(pc, tracker);
	td->td_critnest--;
	sched_unpin();

	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
		THREAD_SLEEPING_OK();

	if (0 == (td->td_owepreempt | tracker->rmp_flags))
		return;

	_rm_unlock_hard(td, tracker);
}
示例#20
0
/* PPGTT support for Sandybdrige/Gen6 and later */
static void
i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
    unsigned first_entry, unsigned num_entries)
{
	uint32_t *pt_vaddr;
	uint32_t scratch_pte;
	struct sf_buf *sf;
	unsigned act_pd, first_pte, last_pte, i;

	act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
	first_pte = first_entry % I915_PPGTT_PT_ENTRIES;

	scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
	scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;

	while (num_entries) {
		last_pte = first_pte + num_entries;
		if (last_pte > I915_PPGTT_PT_ENTRIES)
			last_pte = I915_PPGTT_PT_ENTRIES;

		sched_pin();
		sf = sf_buf_alloc(ppgtt->pt_pages[act_pd], SFB_CPUPRIVATE);
		pt_vaddr = (uint32_t *)(uintptr_t)sf_buf_kva(sf);

		for (i = first_pte; i < last_pte; i++)
			pt_vaddr[i] = scratch_pte;

		sf_buf_free(sf);
		sched_unpin();

		num_entries -= last_pte - first_pte;
		first_pte = 0;
		act_pd++;
	}

}
示例#21
0
/*
 * Implement uiomove(9) from physical memory using sf_bufs to reduce
 * the creation and destruction of ephemeral mappings.
 */
int
uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
{
	struct sf_buf *sf;
	struct thread *td = curthread;
	struct iovec *iov;
	void *cp;
	vm_offset_t page_offset;
	size_t cnt;
	int error = 0;
	int save = 0;

	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
	    ("uiomove_fromphys: mode"));
	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
	    ("uiomove_fromphys proc"));
	save = td->td_pflags & TDP_DEADLKTREAT;
	td->td_pflags |= TDP_DEADLKTREAT;
	while (n > 0 && uio->uio_resid) {
		iov = uio->uio_iov;
		cnt = iov->iov_len;
		if (cnt == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			continue;
		}
		if (cnt > n)
			cnt = n;
		page_offset = offset & PAGE_MASK;
		cnt = min(cnt, PAGE_SIZE - page_offset);
		sched_pin();
		sf = sf_buf_alloc(ma[offset >> PAGE_SHIFT], SFB_CPUPRIVATE);
		cp = (char *)sf_buf_kva(sf) + page_offset;
		switch (uio->uio_segflg) {
		case UIO_USERSPACE:
			maybe_yield();
			if (uio->uio_rw == UIO_READ)
				error = copyout(cp, iov->iov_base, cnt);
			else
				error = copyin(iov->iov_base, cp, cnt);
			if (error) {
				sf_buf_free(sf);
				sched_unpin();
				goto out;
			}
			break;
		case UIO_SYSSPACE:
			if (uio->uio_rw == UIO_READ)
				bcopy(cp, iov->iov_base, cnt);
			else
				bcopy(iov->iov_base, cp, cnt);
			break;
		case UIO_NOCOPY:
			break;
		}
		sf_buf_free(sf);
		sched_unpin();
		iov->iov_base = (char *)iov->iov_base + cnt;
		iov->iov_len -= cnt;
		uio->uio_resid -= cnt;
		uio->uio_offset += cnt;
		offset += cnt;
		n -= cnt;
	}
out:
	if (save == 0)
		td->td_pflags &= ~TDP_DEADLKTREAT;
	return (error);
}
示例#22
0
t_Handle
qman_portal_setup(struct qman_softc *qsc)
{
    struct dpaa_portals_softc *sc;
    t_QmPortalParam qpp;
    unsigned int cpu, p;
    t_Handle portal;

    /* Return NULL if we're not ready or while detach */
    if (qp_sc == NULL)
        return (NULL);

    sc = qp_sc;

    sched_pin();
    portal = NULL;
    cpu = PCPU_GET(cpuid);

    /* Check if portal is ready */
    while (atomic_cmpset_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph,
                                0, -1) == 0) {
        p = atomic_load_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph);

        /* Return if portal is already initialized */
        if (p != 0 && p != -1) {
            sched_unpin();
            return ((t_Handle)p);
        }

        /* Not inititialized and "owned" by another thread */
        thread_lock(curthread);
        mi_switch(SW_VOL, NULL);
        thread_unlock(curthread);
    }

    /* Map portal registers */
    dpaa_portal_map_registers(sc);

    /* Configure and initialize portal */
    qpp.ceBaseAddress = rman_get_bushandle(sc->sc_rres[0]);
    qpp.ciBaseAddress = rman_get_bushandle(sc->sc_rres[1]);
    qpp.h_Qm = qsc->sc_qh;
    qpp.swPortalId = cpu;
    qpp.irq = (int)sc->sc_dp[cpu].dp_ires;
    qpp.fdLiodnOffset = 0;
    qpp.f_DfltFrame = qman_received_frame_callback;
    qpp.f_RejectedFrame = qman_rejected_frame_callback;
    qpp.h_App = qsc;

    portal = QM_PORTAL_Config(&qpp);
    if (portal == NULL)
        goto err;

    if (QM_PORTAL_Init(portal) != E_OK)
        goto err;

    if (QM_PORTAL_AddPoolChannel(portal, QMAN_COMMON_POOL_CHANNEL) != E_OK)
        goto err;

    atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph,
                        (uint32_t)portal);
    sched_unpin();

    return (portal);

err:
    if (portal != NULL)
        QM_PORTAL_Free(portal);

    atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph, 0);
    sched_unpin();

    return (NULL);
}
示例#23
0
/*
 * Wait "n" microseconds.
 * Relies on timer 1 counting down from (i8254_freq / hz)
 * Note: timer had better have been programmed before this is first used!
 */
void
DELAY(int n)
{
	int delta, prev_tick, tick, ticks_left;

#ifdef DELAYDEBUG
	int getit_calls = 1;
	int n1;
	static int state = 0;
#endif

	if (tsc_freq != 0 && !tsc_is_broken) {
		uint64_t start, end, now;

		sched_pin();
		start = rdtsc();
		end = start + (tsc_freq * n) / 1000000;
		do {
			cpu_spinwait();
			now = rdtsc();
		} while (now < end || (now > start && end < start));
		sched_unpin();
		return;
	}
#ifdef DELAYDEBUG
	if (state == 0) {
		state = 1;
		for (n1 = 1; n1 <= 10000000; n1 *= 10)
			DELAY(n1);
		state = 2;
	}
	if (state == 1)
		printf("DELAY(%d)...", n);
#endif
	/*
	 * Read the counter first, so that the rest of the setup overhead is
	 * counted.  Guess the initial overhead is 20 usec (on most systems it
	 * takes about 1.5 usec for each of the i/o's in getit().  The loop
	 * takes about 6 usec on a 486/33 and 13 usec on a 386/20.  The
	 * multiplications and divisions to scale the count take a while).
	 *
	 * However, if ddb is active then use a fake counter since reading
	 * the i8254 counter involves acquiring a lock.  ddb must not do
	 * locking for many reasons, but it calls here for at least atkbd
	 * input.
	 */
#ifdef KDB
	if (kdb_active)
		prev_tick = 1;
	else
#endif
		prev_tick = getit();
	n -= 0;			/* XXX actually guess no initial overhead */
	/*
	 * Calculate (n * (i8254_freq / 1e6)) without using floating point
	 * and without any avoidable overflows.
	 */
	if (n <= 0)
		ticks_left = 0;
	else if (n < 256)
		/*
		 * Use fixed point to avoid a slow division by 1000000.
		 * 39099 = 1193182 * 2^15 / 10^6 rounded to nearest.
		 * 2^15 is the first power of 2 that gives exact results
		 * for n between 0 and 256.
		 */
		ticks_left = ((u_int)n * 39099 + (1 << 15) - 1) >> 15;
	else
示例#24
0
static int
tmpfs_mappedread(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio)
{
	struct sf_buf	*sf;
	vm_pindex_t	idx;
	vm_page_t	m;
	vm_offset_t	offset;
	off_t		addr;
	size_t		tlen;
	char		*ma;
	int		error;

	addr = uio->uio_offset;
	idx = OFF_TO_IDX(addr);
	offset = addr & PAGE_MASK;
	tlen = MIN(PAGE_SIZE - offset, len);

	if ((vobj == NULL) ||
	    (vobj->resident_page_count == 0 && vobj->cache == NULL))
		goto nocache;

	VM_OBJECT_LOCK(vobj);
lookupvpg:
	if (((m = vm_page_lookup(vobj, idx)) != NULL) &&
	    vm_page_is_valid(m, offset, tlen)) {
		if ((m->oflags & VPO_BUSY) != 0) {
			/*
			 * Reference the page before unlocking and sleeping so
			 * that the page daemon is less likely to reclaim it.  
			 */
			vm_page_reference(m);
			vm_page_sleep(m, "tmfsmr");
			goto lookupvpg;
		}
		vm_page_busy(m);
		VM_OBJECT_UNLOCK(vobj);
		error = uiomove_fromphys(&m, offset, tlen, uio);
		VM_OBJECT_LOCK(vobj);
		vm_page_wakeup(m);
		VM_OBJECT_UNLOCK(vobj);
		return	(error);
	} else if (m != NULL && uio->uio_segflg == UIO_NOCOPY) {
		KASSERT(offset == 0,
		    ("unexpected offset in tmpfs_mappedread for sendfile"));
		if ((m->oflags & VPO_BUSY) != 0) {
			/*
			 * Reference the page before unlocking and sleeping so
			 * that the page daemon is less likely to reclaim it.  
			 */
			vm_page_reference(m);
			vm_page_sleep(m, "tmfsmr");
			goto lookupvpg;
		}
		vm_page_busy(m);
		VM_OBJECT_UNLOCK(vobj);
		sched_pin();
		sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
		ma = (char *)sf_buf_kva(sf);
		error = tmpfs_nocacheread_buf(tobj, idx, 0, tlen, ma);
		if (error == 0) {
			if (tlen != PAGE_SIZE)
				bzero(ma + tlen, PAGE_SIZE - tlen);
			uio->uio_offset += tlen;
			uio->uio_resid -= tlen;
		}
		sf_buf_free(sf);
		sched_unpin();
		VM_OBJECT_LOCK(vobj);
		if (error == 0)
			m->valid = VM_PAGE_BITS_ALL;
		vm_page_wakeup(m);
		VM_OBJECT_UNLOCK(vobj);
		return	(error);
	}
	VM_OBJECT_UNLOCK(vobj);
nocache:
	error = tmpfs_nocacheread(tobj, idx, offset, tlen, uio);

	return	(error);
}
示例#25
0
文件: qman.c 项目: 2asoft/freebsd
t_Handle
qman_fqr_create(uint32_t fqids_num, e_QmFQChannel channel, uint8_t wq,
    bool force_fqid, uint32_t fqid_or_align, bool init_parked,
    bool hold_active, bool prefer_in_cache, bool congst_avoid_ena,
    t_Handle congst_group, int8_t overhead_accounting_len,
    uint32_t tail_drop_threshold)
{
	struct qman_softc *sc;
	t_QmFqrParams fqr;
	unsigned int cpu;
	t_Handle fqrh, portal;

	sc = qman_sc;

	sched_pin();
	cpu = PCPU_GET(cpuid);

	/* Ensure we have got QMan port initialized */
	portal = qman_portal_setup(sc);
	if (portal == NULL) {
		device_printf(sc->sc_dev, "could not setup QMan portal\n");
		goto err;
	}

	fqr.h_Qm = sc->sc_qh;
	fqr.h_QmPortal = portal;
	fqr.initParked = init_parked;
	fqr.holdActive = hold_active;
	fqr.preferInCache = prefer_in_cache;

	/* We do not support stashing */
	fqr.useContextAForStash = FALSE;
	fqr.p_ContextA = 0;
	fqr.p_ContextB = 0;

	fqr.channel = channel;
	fqr.wq = wq;
	fqr.shadowMode = FALSE;
	fqr.numOfFqids = fqids_num;

	/* FQID */
	fqr.useForce = force_fqid;
	if (force_fqid) {
		fqr.qs.frcQ.fqid = fqid_or_align;
	} else {
		fqr.qs.nonFrcQs.align = fqid_or_align;
	}

	/* Congestion Avoidance */
	fqr.congestionAvoidanceEnable = congst_avoid_ena;
	if (congst_avoid_ena) {
		fqr.congestionAvoidanceParams.h_QmCg = congst_group;
		fqr.congestionAvoidanceParams.overheadAccountingLength =
		    overhead_accounting_len;
		fqr.congestionAvoidanceParams.fqTailDropThreshold =
		    tail_drop_threshold;
	} else {
		fqr.congestionAvoidanceParams.h_QmCg = 0;
		fqr.congestionAvoidanceParams.overheadAccountingLength = 0;
		fqr.congestionAvoidanceParams.fqTailDropThreshold = 0;
	}

	fqrh = QM_FQR_Create(&fqr);
	if (fqrh == NULL) {
		device_printf(sc->sc_dev, "could not create Frame Queue Range"
		    "\n");
		goto err;
	}

	sc->sc_fqr_cpu[QM_FQR_GetFqid(fqrh)] = PCPU_GET(cpuid);

	sched_unpin();

	return (fqrh);

err:
	sched_unpin();

	return (NULL);
}
示例#26
0
文件: qman.c 项目: 2asoft/freebsd
int
qman_attach(device_t dev)
{
	struct qman_softc *sc;
	t_QmParam qp;
	t_Error error;
	t_QmRevisionInfo rev;

	sc = device_get_softc(dev);
	sc->sc_dev = dev;
	qman_sc = sc;

	if (XX_MallocSmartInit() != E_OK) {
		device_printf(dev, "could not initialize smart allocator.\n");
		return (ENXIO);
	}

	sched_pin();

	/* Allocate resources */
	sc->sc_rrid = 0;
	sc->sc_rres = bus_alloc_resource(dev, SYS_RES_MEMORY,
	    &sc->sc_rrid, 0, ~0, QMAN_CCSR_SIZE, RF_ACTIVE);
	if (sc->sc_rres == NULL) {
		device_printf(dev, "could not allocate memory.\n");
		goto err;
	}

	sc->sc_irid = 0;
	sc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ,
	    &sc->sc_irid, RF_ACTIVE | RF_SHAREABLE);
	if (sc->sc_ires == NULL) {
		device_printf(dev, "could not allocate error interrupt.\n");
		goto err;
	}

	if (qp_sc == NULL)
		goto err;

	dpaa_portal_map_registers(qp_sc);

	/* Initialize QMan */
	qp.guestId = NCSW_MASTER_ID;
	qp.baseAddress = rman_get_bushandle(sc->sc_rres);
	qp.swPortalsBaseAddress = rman_get_bushandle(qp_sc->sc_rres[0]);
	qp.liodn = 0;
	qp.totalNumOfFqids = QMAN_MAX_FQIDS;
	qp.fqdMemPartitionId = NCSW_MASTER_ID;
	qp.pfdrMemPartitionId = NCSW_MASTER_ID;
	qp.f_Exception = qman_exception;
	qp.h_App = sc;
	qp.errIrq = (int)sc->sc_ires;
	qp.partFqidBase = QMAN_FQID_BASE;
	qp.partNumOfFqids = QMAN_MAX_FQIDS;
	qp.partCgsBase = 0;
	qp.partNumOfCgs = 0;

	sc->sc_qh = QM_Config(&qp);
	if (sc->sc_qh == NULL) {
		device_printf(dev, "could not be configured\n");
		goto err;
	}

	error = QM_Init(sc->sc_qh);
	if (error != E_OK) {
		device_printf(dev, "could not be initialized\n");
		goto err;
	}

	error = QM_GetRevision(sc->sc_qh, &rev);
	if (error != E_OK) {
		device_printf(dev, "could not get QMan revision\n");
		goto err;
	}

	device_printf(dev, "Hardware version: %d.%d.\n",
	    rev.majorRev, rev.minorRev);

	sched_unpin();

	qman_portal_setup(sc);

	return (0);

err:
	sched_unpin();
	qman_detach(dev);
	return (ENXIO);
}
示例#27
0
static int
_rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
{
	struct pcpu *pc;

	critical_enter();
	pc = pcpu_find(curcpu);

	/* Check if we just need to do a proper critical_exit. */
	if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
		critical_exit();
		return (1);
	}

	/* Remove our tracker from the per-cpu list. */
	rm_tracker_remove(pc, tracker);

	/* Check to see if the IPI granted us the lock after all. */
	if (tracker->rmp_flags) {
		/* Just add back tracker - we hold the lock. */
		rm_tracker_add(pc, tracker);
		critical_exit();
		return (1);
	}

	/*
	 * We allow readers to acquire a lock even if a writer is blocked if
	 * the lock is recursive and the reader already holds the lock.
	 */
	if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
		/*
		 * Just grant the lock if this thread already has a tracker
		 * for this lock on the per-cpu queue.
		 */
		if (rm_trackers_present(pc, rm, curthread) != 0) {
			mtx_lock_spin(&rm_spinlock);
			LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
			    rmp_qentry);
			tracker->rmp_flags = RMPF_ONQUEUE;
			mtx_unlock_spin(&rm_spinlock);
			rm_tracker_add(pc, tracker);
			critical_exit();
			return (1);
		}
	}

	sched_unpin();
	critical_exit();

	if (trylock) {
		if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
			if (!sx_try_xlock(&rm->rm_lock_sx))
				return (0);
		} else {
			if (!mtx_trylock(&rm->rm_lock_mtx))
				return (0);
		}
	} else {
		if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
			THREAD_SLEEPING_OK();
			sx_xlock(&rm->rm_lock_sx);
			THREAD_NO_SLEEPING();
		} else
			mtx_lock(&rm->rm_lock_mtx);
	}

	critical_enter();
	pc = pcpu_find(curcpu);
	CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
	rm_tracker_add(pc, tracker);
	sched_pin();
	critical_exit();

	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
		sx_xunlock(&rm->rm_lock_sx);
	else
		mtx_unlock(&rm->rm_lock_mtx);

	return (1);
}
示例#28
0
t_Handle
bman_portal_setup(struct bman_softc *bsc)
{
    struct dpaa_portals_softc *sc;
    t_BmPortalParam bpp;
    t_Handle portal;
    unsigned int cpu, p;

    /* Return NULL if we're not ready or while detach */
    if (bp_sc == NULL)
        return (NULL);

    sc = bp_sc;

    sched_pin();
    portal = NULL;
    cpu = PCPU_GET(cpuid);

    /* Check if portal is ready */
    while (atomic_cmpset_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph,
                                0, -1) == 0) {
        p = atomic_load_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph);

        /* Return if portal is already initialized */
        if (p != 0 && p != -1) {
            sched_unpin();
            return ((t_Handle)p);
        }

        /* Not inititialized and "owned" by another thread */
        thread_lock(curthread);
        mi_switch(SW_VOL, NULL);
        thread_unlock(curthread);
    }

    /* Map portal registers */
    dpaa_portal_map_registers(sc);

    /* Configure and initialize portal */
    bpp.ceBaseAddress = rman_get_bushandle(sc->sc_rres[0]);
    bpp.ciBaseAddress = rman_get_bushandle(sc->sc_rres[1]);
    bpp.h_Bm = bsc->sc_bh;
    bpp.swPortalId = cpu;
    bpp.irq = (int)sc->sc_dp[cpu].dp_ires;

    portal = BM_PORTAL_Config(&bpp);
    if (portal == NULL)
        goto err;

    if (BM_PORTAL_Init(portal) != E_OK)
        goto err;

    atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph,
                        (uint32_t)portal);

    sched_unpin();

    return (portal);

err:
    if (portal != NULL)
        BM_PORTAL_Free(portal);

    atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph, 0);
    sched_unpin();

    return (NULL);
}
示例#29
0
t_Handle
bman_pool_create(uint8_t *bpid, uint16_t bufferSize, uint16_t maxBuffers,
    uint16_t minBuffers, uint16_t allocBuffers, t_GetBufFunction *f_GetBuf,
    t_PutBufFunction *f_PutBuf, uint32_t dep_sw_entry, uint32_t dep_sw_exit,
    uint32_t dep_hw_entry, uint32_t dep_hw_exit,
    t_BmDepletionCallback *f_Depletion, t_Handle h_BufferPool,
    t_PhysToVirt *f_PhysToVirt, t_VirtToPhys *f_VirtToPhys)
{
	uint32_t thresholds[MAX_DEPLETION_THRESHOLDS];
	struct bman_softc *sc;
	t_Handle pool, portal;
	t_BmPoolParam bpp;
	int error;

	sc = bman_sc;
	pool = NULL;

	sched_pin();

	portal = bman_portal_setup(sc);
	if (portal == NULL)
		goto err;

	memset(&bpp, 0, sizeof(bpp));
	bpp.h_Bm = sc->sc_bh;
	bpp.h_BmPortal = portal;
	bpp.h_App = h_BufferPool;
	bpp.numOfBuffers = allocBuffers;

	bpp.bufferPoolInfo.h_BufferPool = h_BufferPool;
	bpp.bufferPoolInfo.f_GetBuf = f_GetBuf;
	bpp.bufferPoolInfo.f_PutBuf = f_PutBuf;
	bpp.bufferPoolInfo.f_PhysToVirt = f_PhysToVirt;
	bpp.bufferPoolInfo.f_VirtToPhys = f_VirtToPhys;
	bpp.bufferPoolInfo.bufferSize = bufferSize;

	pool = BM_POOL_Config(&bpp);
	if (pool == NULL)
		goto err;

	/*
	 * Buffer context must be disabled on FreeBSD
	 * as it could cause memory corruption.
	 */
	BM_POOL_ConfigBuffContextMode(pool, 0);

	if (minBuffers != 0 || maxBuffers != 0) {
		error = BM_POOL_ConfigStockpile(pool, maxBuffers, minBuffers);
		if (error != E_OK)
			goto err;
	}

	if (f_Depletion != NULL) {
		thresholds[BM_POOL_DEP_THRESH_SW_ENTRY] = dep_sw_entry;
		thresholds[BM_POOL_DEP_THRESH_SW_EXIT] = dep_sw_exit;
		thresholds[BM_POOL_DEP_THRESH_HW_ENTRY] = dep_hw_entry;
		thresholds[BM_POOL_DEP_THRESH_HW_EXIT] = dep_hw_exit;
		error = BM_POOL_ConfigDepletion(pool, f_Depletion, thresholds);
		if (error != E_OK)
			goto err;
	}

	error = BM_POOL_Init(pool);
	if (error != E_OK)
		goto err;

	*bpid = BM_POOL_GetId(pool);
	sc->sc_bpool_cpu[*bpid] = PCPU_GET(cpuid);

	sched_unpin();

	return (pool);

err:
	if (pool != NULL)
		BM_POOL_Free(pool);

	sched_unpin();

	return (NULL);
}