Example #1
0
static void
dmar_qi_task(void *arg, int pending __unused)
{
	struct dmar_unit *unit;
	struct dmar_map_entry *entry;
	uint32_t ics;

	unit = arg;

	DMAR_LOCK(unit);
	for (;;) {
		entry = TAILQ_FIRST(&unit->tlb_flush_entries);
		if (entry == NULL)
			break;
		if (!dmar_qi_seq_processed(unit, &entry->gseq))
			break;
		TAILQ_REMOVE(&unit->tlb_flush_entries, entry, dmamap_link);
		DMAR_UNLOCK(unit);
		dmar_domain_free_entry(entry, (entry->flags &
		    DMAR_MAP_ENTRY_QI_NF) == 0);
		DMAR_LOCK(unit);
	}
	ics = dmar_read4(unit, DMAR_ICS_REG);
	if ((ics & DMAR_ICS_IWC) != 0) {
		ics = DMAR_ICS_IWC;
		dmar_write4(unit, DMAR_ICS_REG, ics);
	}
	if (unit->inv_seq_waiters > 0)
		wakeup(&unit->inv_seq_waiters);
	DMAR_UNLOCK(unit);
}
Example #2
0
static void
dmar_qi_ensure(struct dmar_unit *unit, int descr_count)
{
	uint32_t head;
	int bytes;

	DMAR_ASSERT_LOCKED(unit);
	bytes = descr_count << DMAR_IQ_DESCR_SZ_SHIFT;
	for (;;) {
		if (bytes <= unit->inv_queue_avail)
			break;
		/* refill */
		head = dmar_read4(unit, DMAR_IQH_REG);
		head &= DMAR_IQH_MASK;
		unit->inv_queue_avail = head - unit->inv_queue_tail -
		    DMAR_IQ_DESCR_SZ;
		if (head <= unit->inv_queue_tail)
			unit->inv_queue_avail += unit->inv_queue_size;
		if (bytes <= unit->inv_queue_avail)
			break;

		/*
		 * No space in the queue, do busy wait.  Hardware must
		 * make a progress.  But first advance the tail to
		 * inform the descriptor streamer about entries we
		 * might have already filled, otherwise they could
		 * clog the whole queue..
		 */
		dmar_qi_advance_tail(unit);
		unit->inv_queue_full++;
		cpu_spinwait();
	}
	unit->inv_queue_avail -= bytes;
}
Example #3
0
void
dmar_disable_qi_intr(struct dmar_unit *unit)
{
	uint32_t iectl;

	DMAR_ASSERT_LOCKED(unit);
	KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported", unit->unit));
	iectl = dmar_read4(unit, DMAR_IECTL_REG);
	dmar_write4(unit, DMAR_IECTL_REG, iectl | DMAR_IECTL_IM);
}
Example #4
0
int
dmar_init_qi(struct dmar_unit *unit)
{
	uint64_t iqa;
	uint32_t ics;
	int qi_sz;

	if (!DMAR_HAS_QI(unit) || (unit->hw_cap & DMAR_CAP_CM) != 0)
		return (0);
	unit->qi_enabled = 1;
	TUNABLE_INT_FETCH("hw.dmar.qi", &unit->qi_enabled);
	if (!unit->qi_enabled)
		return (0);

	TAILQ_INIT(&unit->tlb_flush_entries);
	TASK_INIT(&unit->qi_task, 0, dmar_qi_task, unit);
	unit->qi_taskqueue = taskqueue_create_fast("dmarqf", M_WAITOK,
	    taskqueue_thread_enqueue, &unit->qi_taskqueue);
	taskqueue_start_threads(&unit->qi_taskqueue, 1, PI_AV,
	    "dmar%d qi taskq", unit->unit);

	unit->inv_waitd_gen = 0;
	unit->inv_waitd_seq = 1;

	qi_sz = DMAR_IQA_QS_DEF;
	TUNABLE_INT_FETCH("hw.dmar.qi_size", &qi_sz);
	if (qi_sz > DMAR_IQA_QS_MAX)
		qi_sz = DMAR_IQA_QS_MAX;
	unit->inv_queue_size = (1ULL << qi_sz) * PAGE_SIZE;
	/* Reserve one descriptor to prevent wraparound. */
	unit->inv_queue_avail = unit->inv_queue_size - DMAR_IQ_DESCR_SZ;

	/* The invalidation queue reads by DMARs are always coherent. */
	unit->inv_queue = kmem_alloc_contig(kernel_arena, unit->inv_queue_size,
	    M_WAITOK | M_ZERO, 0, dmar_high, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
	unit->inv_waitd_seq_hw_phys = pmap_kextract(
	    (vm_offset_t)&unit->inv_waitd_seq_hw);

	DMAR_LOCK(unit);
	dmar_write8(unit, DMAR_IQT_REG, 0);
	iqa = pmap_kextract(unit->inv_queue);
	iqa |= qi_sz;
	dmar_write8(unit, DMAR_IQA_REG, iqa);
	dmar_enable_qi(unit);
	ics = dmar_read4(unit, DMAR_ICS_REG);
	if ((ics & DMAR_ICS_IWC) != 0) {
		ics = DMAR_ICS_IWC;
		dmar_write4(unit, DMAR_ICS_REG, ics);
	}
	dmar_enable_qi_intr(unit);
	DMAR_UNLOCK(unit);

	return (0);
}
Example #5
0
static int
dmar_disable_qi(struct dmar_unit *unit)
{
	int error;

	DMAR_ASSERT_LOCKED(unit);
	unit->hw_gcmd &= ~DMAR_GCMD_QIE;
	dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd);
	DMAR_WAIT_UNTIL(((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_QIES)
	    == 0));
	return (error);
}
Example #6
0
static int
dmar_disable_qi(struct dmar_unit *unit)
{

	DMAR_ASSERT_LOCKED(unit);
	unit->hw_gcmd &= ~DMAR_GCMD_QIE;
	dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd);
	/* XXXKIB should have a timeout */
	while ((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_QIES) != 0)
		cpu_spinwait();
	return (0);
}