Exemplo n.º 1
0
void cxio_dump_tpt(struct cxio_rdev *rdev, uint32_t stag)
{
	struct ch_mem_range *m;
	u64 *data;
	int rc;
	int size = 32;

	m = kmalloc(sizeof(*m) + size, M_NOWAIT);
	if (!m) {
		CTR1(KTR_IW_CXGB, "%s couldn't allocate memory.", __FUNCTION__);
		return;
	}
	m->mem_id = MEM_PMRX;
	m->addr = (stag>>8) * 32 + rdev->rnic_info.tpt_base;
	m->len = size;
	CTR3(KTR_IW_CXGB, "%s TPT addr 0x%x len %d", __FUNCTION__, m->addr, m->len);
	rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
	if (rc) {
		CTR2(KTR_IW_CXGB, "%s toectl returned error %d", __FUNCTION__, rc);
		free(m, M_DEVBUF);
		return;
	}

	data = (u64 *)m->buf;
	while (size > 0) {
		CTR2(KTR_IW_CXGB, "TPT %08x: %016llx", m->addr, (unsigned long long) *data);
		size -= 8;
		data++;
		m->addr += 8;
	}
	free(m, M_DEVBUF);
}
Exemplo n.º 2
0
/*
 * This function is called if the first try at releasing a write lock failed.
 * This means that one of the 2 waiter bits must be set indicating that at
 * least one thread is waiting on this lock.
 */
void
_rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
{
	struct turnstile *ts;
	uintptr_t v;
	int queue;

	if (SCHEDULER_STOPPED())
		return;

	if (rw_wlocked(rw) && rw_recursed(rw)) {
		rw->rw_recurse--;
		if (LOCK_LOG_TEST(&rw->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw);
		return;
	}

	KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
	    ("%s: neither of the waiter flags are set", __func__));

	if (LOCK_LOG_TEST(&rw->lock_object, 0))
		CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);

	turnstile_chain_lock(&rw->lock_object);
	ts = turnstile_lookup(&rw->lock_object);
	MPASS(ts != NULL);

	/*
	 * Use the same algo as sx locks for now.  Prefer waking up shared
	 * waiters if we have any over writers.  This is probably not ideal.
	 *
	 * 'v' is the value we are going to write back to rw_lock.  If we
	 * have waiters on both queues, we need to preserve the state of
	 * the waiter flag for the queue we don't wake up.  For now this is
	 * hardcoded for the algorithm mentioned above.
	 *
	 * In the case of both readers and writers waiting we wakeup the
	 * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
	 * new writer comes in before a reader it will claim the lock up
	 * above.  There is probably a potential priority inversion in
	 * there that could be worked around either by waking both queues
	 * of waiters or doing some complicated lock handoff gymnastics.
	 */
	v = RW_UNLOCKED;
	if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) {
		queue = TS_EXCLUSIVE_QUEUE;
		v |= (rw->rw_lock & RW_LOCK_READ_WAITERS);
	} else
		queue = TS_SHARED_QUEUE;

	/* Wake up all waiters for the specific queue. */
	if (LOCK_LOG_TEST(&rw->lock_object, 0))
		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
		    queue == TS_SHARED_QUEUE ? "read" : "write");
	turnstile_broadcast(ts, queue);
	atomic_store_rel_ptr(&rw->rw_lock, v);
	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
	turnstile_chain_unlock(&rw->lock_object);
}
Exemplo n.º 3
0
/*
 * This function represents the so-called 'hard case' for sx_xunlock
 * operation.  All 'easy case' failures are redirected to this.  Note
 * that ideally this would be a static function, but it needs to be
 * accessible from at least sx.h.
 */
void
_sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
{
	uintptr_t x;
	int queue, wakeup_swapper;

	if (SCHEDULER_STOPPED())
		return;

	MPASS(!(sx->sx_lock & SX_LOCK_SHARED));

	/* If the lock is recursed, then unrecurse one level. */
	if (sx_xlocked(sx) && sx_recursed(sx)) {
		if ((--sx->sx_recurse) == 0)
			atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
		if (LOCK_LOG_TEST(&sx->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
		return;
	}
	MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS |
	    SX_LOCK_EXCLUSIVE_WAITERS));
	if (LOCK_LOG_TEST(&sx->lock_object, 0))
		CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);

	sleepq_lock(&sx->lock_object);
	x = SX_LOCK_UNLOCKED;

	/*
	 * The wake up algorithm here is quite simple and probably not
	 * ideal.  It gives precedence to shared waiters if they are
	 * present.  For this condition, we have to preserve the
	 * state of the exclusive waiters flag.
	 * If interruptible sleeps left the shared queue empty avoid a
	 * starvation for the threads sleeping on the exclusive queue by giving
	 * them precedence and cleaning up the shared waiters bit anyway.
	 */
	if ((sx->sx_lock & SX_LOCK_SHARED_WAITERS) != 0 &&
	    sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) {
		queue = SQ_SHARED_QUEUE;
		x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS);
	} else
		queue = SQ_EXCLUSIVE_QUEUE;

	/* Wake up all the waiters for the specific queue. */
	if (LOCK_LOG_TEST(&sx->lock_object, 0))
		CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
		    __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
		    "exclusive");
	atomic_store_rel_ptr(&sx->sx_lock, x);
	wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
	    queue);
	sleepq_release(&sx->lock_object);
	if (wakeup_swapper)
		kick_proc0();
}
Exemplo n.º 4
0
static void
stop_ep_timer(struct iwch_ep *ep)
{
	CTR2(KTR_IW_CXGB, "%s ep %p", __FUNCTION__, ep);
	callout_drain(&ep->timer);
	put_ep(&ep->com);
}
Exemplo n.º 5
0
static void
ntb_rx_completion_task(void *arg, int pending)
{
	struct ntb_transport_qp *qp = arg;
	struct mbuf *m;
	struct ntb_queue_entry *entry;

	CTR0(KTR_NTB, "RX: rx_completion_task");

	while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) {
		m = entry->buf;
		CTR2(KTR_NTB, "RX: completing entry %p, mbuf %p", entry, m);
		if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
			qp->rx_handler(qp, qp->cb_data, m, entry->len);

		entry->buf = NULL;
		entry->len = qp->transport->bufsize;

		CTR1(KTR_NTB,"RX: entry %p removed from rx_free_q "
		    "and added to rx_pend_q", entry);
		ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q);
		if (qp->rx_err_no_buf > qp->last_rx_no_buf) {
			qp->last_rx_no_buf = qp->rx_err_no_buf;
			CTR0(KTR_NTB, "RX: could spawn rx task");
			callout_reset(&qp->rx_full, hz / 1000, ntb_rx_pendq_full,
			    qp);
		}
	}
}
Exemplo n.º 6
0
vm_paddr_t
pmap_kextract(vm_offset_t va)
{

	CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
	return (MMU_KEXTRACT(mmu_obj, va));
}
Exemplo n.º 7
0
void
pmap_zero_page_idle(vm_page_t m)
{

	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
	MMU_ZERO_PAGE_IDLE(mmu_obj, m);
}
Exemplo n.º 8
0
void
pmap_remove_pages(pmap_t pmap)
{

	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
	MMU_REMOVE_PAGES(mmu_obj, pmap);
}
Exemplo n.º 9
0
void
pmap_release(pmap_t pmap)
{

	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
	MMU_RELEASE(mmu_obj, pmap);
}
Exemplo n.º 10
0
void
pmap_kremove(vm_offset_t va)
{

	CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
	return (MMU_KREMOVE(mmu_obj, va));
}
Exemplo n.º 11
0
struct pmap_md *
pmap_scan_md(struct pmap_md *prev)
{

	CTR2(KTR_PMAP, "%s(%p)", __func__, prev);
	return (MMU_SCAN_MD(mmu_obj, prev));
}
Exemplo n.º 12
0
/*
 * When called the executing CPU will send an IPI to all other CPUs
 *  requesting that they halt execution.
 *
 * Usually (but not necessarily) called with 'other_cpus' as its arg.
 *
 *  - Signals all CPUs in map to stop.
 *  - Waits for each to stop.
 *
 * Returns:
 *  -1: error
 *   0: NA
 *   1: ok
 *
 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
 *            from executing at same time.
 */
static int
generic_stop_cpus(cpumask_t map, u_int type)
{
	int i;

	KASSERT(type == IPI_STOP || type == IPI_STOP_HARD,
	    ("%s: invalid stop type", __func__));

	if (!smp_started)
		return 0;

	CTR2(KTR_SMP, "stop_cpus(%x) with %u type", map, type);

	/* send the stop IPI to all CPUs in map */
	ipi_selected(map, type);

	i = 0;
	while ((stopped_cpus & map) != map) {
		/* spin */
		cpu_spinwait();
		i++;
#ifdef DIAGNOSTIC
		if (i == 100000) {
			printf("timeout stopping cpus\n");
			break;
		}
#endif
	}

	return 1;
}
Exemplo n.º 13
0
/*
 * Given a surplus system slot, try assign a new runnable thread to it.
 * Called from:
 *  sched_thread_exit()  (local)
 *  sched_switch()  (local)
 *  sched_thread_exit()  (local)
 *  remrunqueue()  (local)  (not at the moment)
 */
static void
slot_fill(struct ksegrp *kg)
{
	struct thread *td;

	mtx_assert(&sched_lock, MA_OWNED);
	while (kg->kg_avail_opennings > 0) {
		/*
		 * Find the first unassigned thread
		 */
		if ((td = kg->kg_last_assigned) != NULL)
			td = TAILQ_NEXT(td, td_runq);
		else
			td = TAILQ_FIRST(&kg->kg_runq);

		/*
		 * If we found one, send it to the system scheduler.
		 */
		if (td) {
			kg->kg_last_assigned = td;
			sched_add(td, SRQ_YIELDING);
			CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg);
		} else {
			/* no threads to use up the slots. quit now */
			break;
		}
	}
}
Exemplo n.º 14
0
void cxio_dump_tcb(struct cxio_rdev *rdev, uint32_t hwtid)
{
	struct ch_mem_range *m;
	int size = TCB_SIZE;
	uint32_t *data;
	int rc;

	m = kmalloc(sizeof(*m) + size, M_NOWAIT);
	if (!m) {
		CTR1(KTR_IW_CXGB, "%s couldn't allocate memory.", __FUNCTION__);
		return;
	}
	m->mem_id = MEM_CM;
	m->addr = hwtid * size;
	m->len = size;
	CTR3(KTR_IW_CXGB, "%s TCB %d len %d", __FUNCTION__, m->addr, m->len);
	rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
	if (rc) {
		CTR2(KTR_IW_CXGB, "%s toectl returned error %d", __FUNCTION__, rc);
		free(m, M_DEVBUF);
		return;
	}

	data = (uint32_t *)m->buf;
	while (size > 0) {
		printf("%2u: %08x %08x %08x %08x %08x %08x %08x %08x\n",
			m->addr,
			*(data+2), *(data+3), *(data),*(data+1),
			*(data+6), *(data+7), *(data+4), *(data+5));
		size -= 32;
		data += 8;
		m->addr += 32;
	}
	free(m, M_DEVBUF);
}
Exemplo n.º 15
0
int
pmap_page_wired_mappings(vm_page_t m)
{

	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
	return (MMU_PAGE_WIRED_MAPPINGS(mmu_obj, m));
}
Exemplo n.º 16
0
int
pmap_decode_kernel_ptr(vm_offset_t addr, int *is_user, vm_offset_t *decoded)
{

	CTR2(KTR_PMAP, "%s(%#jx)", __func__, (uintmax_t)addr);
	return (MMU_DECODE_KERNEL_PTR(mmu_obj, addr, is_user, decoded));
}
Exemplo n.º 17
0
void
pmap_pinit0(pmap_t pmap)
{

	CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
	MMU_PINIT0(mmu_obj, pmap);
}
Exemplo n.º 18
0
static ACPI_STATUS
EcWrite(struct acpi_ec_softc *sc, UINT8 Address, UINT8 Data)
{
    ACPI_STATUS	status;
    u_int gen_count;

    ACPI_SERIAL_ASSERT(ec);
    CTR2(KTR_ACPI, "ec write to %#x, data %#x", Address, Data);

    status = EcCommand(sc, EC_COMMAND_WRITE);
    if (ACPI_FAILURE(status))
	return (status);

    gen_count = sc->ec_gencount;
    EC_SET_DATA(sc, Address);
    status = EcWaitEvent(sc, EC_EVENT_INPUT_BUFFER_EMPTY, gen_count);
    if (ACPI_FAILURE(status)) {
	device_printf(sc->ec_dev, "EcWrite: failed waiting for sent address\n");
	return (status);
    }

    gen_count = sc->ec_gencount;
    EC_SET_DATA(sc, Data);
    status = EcWaitEvent(sc, EC_EVENT_INPUT_BUFFER_EMPTY, gen_count);
    if (ACPI_FAILURE(status)) {
	device_printf(sc->ec_dev, "EcWrite: failed waiting for sent data\n");
	return (status);
    }

    return (AE_OK);
}
Exemplo n.º 19
0
void
pmap_remove_all(vm_page_t m)
{

	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
	MMU_REMOVE_ALL(mmu_obj, m);
}
Exemplo n.º 20
0
void
pmap_clear_modify(vm_page_t m)
{

	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
	MMU_CLEAR_MODIFY(mmu_obj, m);
}
Exemplo n.º 21
0
void
pmap_remove_write(vm_page_t m)
{

	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
	MMU_REMOVE_WRITE(mmu_obj, m);
}
Exemplo n.º 22
0
void
pmap_growkernel(vm_offset_t va)
{

	CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
	MMU_GROWKERNEL(mmu_obj, va);
}
Exemplo n.º 23
0
void
pmap_deactivate(struct thread *td)
{

	CTR2(KTR_PMAP, "%s(%p)", __func__, td);
	MMU_DEACTIVATE(mmu_obj, td);
}
Exemplo n.º 24
0
boolean_t
pmap_is_modified(vm_page_t m)
{

	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
	return (MMU_IS_MODIFIED(mmu_obj, m));
}
Exemplo n.º 25
0
/*
 * Allocate n page pods.  Returns -1 on failure or the page pod tag.
 */
int
t3_alloc_ppods(struct tom_data *td, unsigned int n, int *ptag)
{
	unsigned int i, j;

	if (__predict_false(!td->ppod_map)) {
		printf("ppod_map not set\n");
		return (EINVAL);
	}

	mtx_lock(&td->ppod_map_lock);
	for (i = 0; i < td->nppods; ) {
		
		for (j = 0; j < n; ++j)           /* scan ppod_map[i..i+n-1] */
			if (td->ppod_map[i + j]) {
				i = i + j + 1;
				goto next;
			}
		memset(&td->ppod_map[i], 1, n);   /* allocate range */
		mtx_unlock(&td->ppod_map_lock);
		CTR2(KTR_TOM,
		    "t3_alloc_ppods: n=%u tag=%u", n, i);
		*ptag = i;
		return (0);
	next: ;
	}
	mtx_unlock(&td->ppod_map_lock);
	return (0);
}
Exemplo n.º 26
0
boolean_t
pmap_ts_referenced(vm_page_t m)
{

	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
	return (MMU_TS_REFERENCED(mmu_obj, m));
}
Exemplo n.º 27
0
int
freebsd32_sigreturn(struct thread *td, struct freebsd32_sigreturn_args *uap)
{
	ucontext32_t uc;
	int error;

	CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp);

	if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) {
		CTR1(KTR_SIG, "sigreturn: efault td=%p", td);
		return (EFAULT);
	}

	error = set_mcontext32(td, &uc.uc_mcontext);
	if (error != 0)
		return (error);

	kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);

#if 0
	CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x",
	     td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]);
#endif

	return (EJUSTRETURN);
}
Exemplo n.º 28
0
void
pmap_page_init(vm_page_t m)
{

	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
	MMU_PAGE_INIT(mmu_obj, m);
}
Exemplo n.º 29
0
/*
 * Called after the last CPL for the toepcb has been received.
 *
 * The inp must be wlocked on entry and is unlocked (or maybe destroyed) by the
 * time this function exits.
 */
static int
toepcb_release(struct toepcb *toep)
{
	struct inpcb *inp = toep->tp_inp;
	struct toedev *tod = toep->tp_tod;
	struct tom_data *td = t3_tomdata(tod);
	int rc;

	INP_WLOCK_ASSERT(inp);
	KASSERT(!(toep->tp_flags & TP_CPL_DONE),
	    ("%s: double release?", __func__));

	CTR2(KTR_CXGB, "%s: tid %d", __func__, toep->tp_tid);

	toep->tp_flags |= TP_CPL_DONE;
	toep->tp_inp = NULL;

	mtx_lock(&td->toep_list_lock);
	TAILQ_REMOVE(&td->toep_list, toep, link);
	mtx_unlock(&td->toep_list_lock);

	if (!(toep->tp_flags & TP_ATTACHED))
		t3_release_offload_resources(toep);

	rc = in_pcbrele_wlocked(inp);
	if (!rc)
		INP_WUNLOCK(inp);
	return (rc);
}
Exemplo n.º 30
0
void
pmap_clear_reference(vm_page_t m)
{

	CTR2(KTR_PMAP, "%s(%p)", __func__, m);
	MMU_CLEAR_REFERENCE(mmu_obj, m);
}