コード例 #1
0
int
virtqueue_postpone_intr(struct virtqueue *vq)
{
	uint16_t ndesc;

	/*
	 * Postpone until at least half of the available descriptors
	 * have been consumed.
	 *
	 * XXX Adaptive factor? (Linux uses 3/4)
	 */
	ndesc = (uint16_t)(vq->vq_ring.avail->idx - vq->vq_used_cons_idx) / 2;

	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX)
		vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
	else
		vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;

	cpu_mfence();

	/*
	 * Enough items may have already been consumed to meet our
	 * threshold since we last checked. Let our caller know so
	 * it processes the new entries.
	 */
	if (virtqueue_nused(vq) > ndesc)
		return (1);

	return (0);
}
コード例 #2
0
void *
virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
{
	struct vring_used_elem *uep;
	void *cookie;
	uint16_t used_idx, desc_idx;

	if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
		return (NULL);

	used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
	uep = &vq->vq_ring.used->ring[used_idx];

	cpu_mfence();
	desc_idx = (uint16_t) uep->id;
	if (len != NULL)
		*len = uep->len;

	vq_ring_free_chain(vq, desc_idx);

	cookie = vq->vq_descx[desc_idx].cookie;
	VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
	vq->vq_descx[desc_idx].cookie = NULL;

	return (cookie);
}
コード例 #3
0
/*
 * Enable interrupts on a given virtqueue. Returns 1 if there are
 * additional entries to process on the virtqueue after we return.
 */
int
virtqueue_enable_intr(struct virtqueue *vq)
{
	/*
	 * Enable interrupts, making sure we get the latest
	 * index of what's already been consumed.
	 */
	vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
		vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx;
	} else {
	       vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
	}

	cpu_mfence();

	/*
	 * Additional items may have been consumed in the time between
	 * since we last checked and enabled interrupts above. Let our
	 * caller know so it processes the new entries.
	 */
	if (vq->vq_used_cons_idx != vq->vq_ring.used->idx)
		return (1);

	return (0);
}
コード例 #4
0
ファイル: drm_cache.c プロジェクト: iHaD/DragonFlyBSD
void
drm_clflush_virt_range(void *in_addr, unsigned long length)
{
	char *addr = in_addr;
	if (cpu_has_clflush) {
		char *end = addr + length;
		cpu_mfence();
		for (; addr < end; addr += cpu_clflush_line_size)
			clflush((unsigned long)addr);
		clflush((unsigned long)(end - 1));
		cpu_mfence();
		return;
	}

	cpu_wbinvd_on_all_cpus();
}
コード例 #5
0
ファイル: virtqueue.c プロジェクト: Gwenio/DragonFlyBSD
void
virtqueue_notify(struct virtqueue *vq, lwkt_serialize_t interlock)
{
	/* Ensure updated avail->idx is visible to host. */
	cpu_mfence();

	if (vq_ring_must_notify_host(vq)) {
		lwkt_serialize_exit(interlock);
		vq_ring_notify_host(vq);
		lwkt_serialize_enter(interlock);
	}
	vq->vq_queued_cnt = 0;
}
コード例 #6
0
static void
vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
{
	uint16_t avail_idx;

	/*
	 * Place the head of the descriptor chain into the next slot and make
	 * it usable to the host. The chain is made available now rather than
	 * deferring to virtqueue_notify() in the hopes that if the host is
	 * currently running on another CPU, we can keep it processing the new
	 * descriptor.
	 */
	avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
	vq->vq_ring.avail->ring[avail_idx] = desc_idx;

	cpu_mfence();
	vq->vq_ring.avail->idx++;

	/* Keep pending count until virtqueue_notify() for debugging. */
	vq->vq_queued_cnt++;
}
コード例 #7
0
ファイル: acpi_cpu_cstate.c プロジェクト: wan721/DragonFlyBSD
/*
 * Idle the CPU in the lowest state possible.  This function is called with
 * interrupts disabled.  Note that once it re-enables interrupts, a task
 * switch can occur so do not access shared data (i.e. the softc) after
 * interrupts are re-enabled.
 */
static void
acpi_cst_idle(void)
{
    struct	acpi_cst_softc *sc;
    struct	acpi_cst_cx *cx_next;
    union microtime_pcpu start, end;
    int		cx_next_idx, i, tdiff, bm_arb_disabled = 0;

    /* If disabled, return immediately. */
    if (acpi_cst_disable_idle) {
	ACPI_ENABLE_IRQS();
	return;
    }

    /*
     * Look up our CPU id to get our softc.  If it's NULL, we'll use C1
     * since there is no Cx state for this processor.
     */
    sc = acpi_cst_softc[mdcpu->mi.gd_cpuid];
    if (sc == NULL) {
	acpi_cst_c1_halt();
	return;
    }

    /* Still probing; use C1 */
    if (sc->cst_flags & ACPI_CST_FLAG_PROBING) {
	acpi_cst_c1_halt();
	return;
    }

    /* Find the lowest state that has small enough latency. */
    cx_next_idx = 0;
    for (i = sc->cst_cx_lowest; i >= 0; i--) {
	if (sc->cst_cx_states[i].trans_lat * 3 <= sc->cst_prev_sleep) {
	    cx_next_idx = i;
	    break;
	}
    }

    /*
     * Check for bus master activity if needed for the selected state.
     * If there was activity, clear the bit and use the lowest non-C3 state.
     */
    cx_next = &sc->cst_cx_states[cx_next_idx];
    if (cx_next->flags & ACPI_CST_CX_FLAG_BM_STS) {
	int bm_active;

	AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active);
	if (bm_active != 0) {
	    AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
	    cx_next_idx = sc->cst_non_c3;
	}
    }

    /* Select the next state and update statistics. */
    cx_next = &sc->cst_cx_states[cx_next_idx];
    sc->cst_cx_stats[cx_next_idx]++;
    KASSERT(cx_next->type != ACPI_STATE_C0, ("C0 sleep"));

    /*
     * Execute HLT (or equivalent) and wait for an interrupt.  We can't
     * calculate the time spent in C1 since the place we wake up is an
     * ISR.  Assume we slept half of quantum and return.
     */
    if (cx_next->type == ACPI_STATE_C1) {
	sc->cst_prev_sleep = (sc->cst_prev_sleep * 3 + 500000 / hz) / 4;
	cx_next->enter(cx_next);
	return;
    }

    /* Execute the proper preamble before enter the selected state. */
    if (cx_next->preamble == ACPI_CST_CX_PREAMBLE_BM_ARB) {
	AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
	bm_arb_disabled = 1;
    } else if (cx_next->preamble == ACPI_CST_CX_PREAMBLE_WBINVD) {
	ACPI_FLUSH_CPU_CACHE();
    }

    /*
     * Enter the selected state and check time spent asleep.
     */
    microtime_pcpu_get(&start);
    cpu_mfence();

    cx_next->enter(cx_next);

    cpu_mfence();
    microtime_pcpu_get(&end);

    /* Enable bus master arbitration, if it was disabled. */
    if (bm_arb_disabled)
	AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);

    ACPI_ENABLE_IRQS();

    /* Find the actual time asleep in microseconds. */
    tdiff = microtime_pcpu_diff(&start, &end);
    sc->cst_prev_sleep = (sc->cst_prev_sleep * 3 + tdiff) / 4;
}
コード例 #8
0
ファイル: kern_mutex.c プロジェクト: kusumi/DragonFlyBSD
/*
 * Wait for async lock completion or abort.  Returns ENOLCK if an abort
 * occurred.
 */
int
mtx_wait_link(mtx_t *mtx, mtx_link_t *link, int flags, int to)
{
	indefinite_info_t info;
	int error;

	indefinite_init(&info, mtx->mtx_ident, 1,
			((link->state & MTX_LINK_LINKED_SH) ? 'm' : 'M'));

	/*
	 * Sleep.  Handle false wakeups, interruptions, etc.
	 * The link may also have been aborted.  The LINKED
	 * bit was set by this cpu so we can test it without
	 * fences.
	 */
	error = 0;
	while (link->state & MTX_LINK_LINKED) {
		tsleep_interlock(link, 0);
		cpu_lfence();
		if (link->state & MTX_LINK_LINKED) {
			error = tsleep(link, flags | PINTERLOCKED,
				       mtx->mtx_ident, to);
			if (error)
				break;
		}
		if ((mtx->mtx_flags & MTXF_NOCOLLSTATS) == 0)
			indefinite_check(&info);
	}

	/*
	 * We need at least a lfence (load fence) to ensure our cpu does not
	 * reorder loads (of data outside the lock structure) prior to the
	 * remote cpu's release, since the above test may have run without
	 * any atomic interactions.
	 *
	 * If we do not do this then state updated by the other cpu before
	 * releasing its lock may not be read cleanly by our cpu when this
	 * function returns.  Even though the other cpu ordered its stores,
	 * our loads can still be out of order.
	 */
	cpu_mfence();

	/*
	 * We are done, make sure the link structure is unlinked.
	 * It may still be on the list due to e.g. EINTR or
	 * EWOULDBLOCK.
	 *
	 * It is possible for the tsleep to race an ABORT and cause
	 * error to be 0.
	 *
	 * The tsleep() can be woken up for numerous reasons and error
	 * might be zero in situations where we intend to return an error.
	 *
	 * (This is the synchronous case so state cannot be CALLEDBACK)
	 */
	switch(link->state) {
	case MTX_LINK_ACQUIRED:
	case MTX_LINK_CALLEDBACK:
		error = 0;
		break;
	case MTX_LINK_ABORTED:
		error = ENOLCK;
		break;
	case MTX_LINK_LINKED_EX:
	case MTX_LINK_LINKED_SH:
		mtx_delete_link(mtx, link);
		/* fall through */
	default:
		if (error == 0)
			error = EWOULDBLOCK;
		break;
	}

	/*
	 * Clear state on status returned.
	 */
	link->state = MTX_LINK_IDLE;

	if ((mtx->mtx_flags & MTXF_NOCOLLSTATS) == 0)
		indefinite_done(&info);

	return error;
}