Example #1
0
static void
dmar_qi_task(void *arg, int pending __unused)
{
	struct dmar_unit *unit;
	struct dmar_map_entry *entry;
	uint32_t ics;

	unit = arg;

	DMAR_LOCK(unit);
	for (;;) {
		entry = TAILQ_FIRST(&unit->tlb_flush_entries);
		if (entry == NULL)
			break;
		if (!dmar_qi_seq_processed(unit, &entry->gseq))
			break;
		TAILQ_REMOVE(&unit->tlb_flush_entries, entry, dmamap_link);
		DMAR_UNLOCK(unit);
		dmar_domain_free_entry(entry, (entry->flags &
		    DMAR_MAP_ENTRY_QI_NF) == 0);
		DMAR_LOCK(unit);
	}
	ics = dmar_read4(unit, DMAR_ICS_REG);
	if ((ics & DMAR_ICS_IWC) != 0) {
		ics = DMAR_ICS_IWC;
		dmar_write4(unit, DMAR_ICS_REG, ics);
	}
	if (unit->inv_seq_waiters > 0)
		wakeup(&unit->inv_seq_waiters);
	DMAR_UNLOCK(unit);
}
Example #2
0
void
dmar_fini_qi(struct dmar_unit *unit)
{
	struct dmar_qi_genseq gseq;

	if (unit->qi_enabled)
		return;
	taskqueue_drain(unit->qi_taskqueue, &unit->qi_task);
	taskqueue_free(unit->qi_taskqueue);
	unit->qi_taskqueue = NULL;

	DMAR_LOCK(unit);
	/* quisce */
	dmar_qi_ensure(unit, 1);
	dmar_qi_emit_wait_seq(unit, &gseq, true);
	dmar_qi_advance_tail(unit);
	dmar_qi_wait_for_seq(unit, &gseq, false);
	/* only after the quisce, disable queue */
	dmar_disable_qi_intr(unit);
	dmar_disable_qi(unit);
	KASSERT(unit->inv_seq_waiters == 0,
	    ("dmar%d: waiters on disabled queue", unit->unit));
	DMAR_UNLOCK(unit);

	kmem_free(kernel_arena, unit->inv_queue, unit->inv_queue_size);
	unit->inv_queue = 0;
	unit->inv_queue_size = 0;
	unit->qi_enabled = 0;
}
Example #3
0
static int
dmar_remap_intr(device_t dev, device_t child, u_int irq)
{
	struct dmar_unit *unit;
	struct dmar_msi_data *dmd;
	uint64_t msi_addr;
	uint32_t msi_data;
	int i, error;

	unit = device_get_softc(dev);
	for (i = 0; i < DMAR_INTR_TOTAL; i++) {
		dmd = &unit->intrs[i];
		if (irq == dmd->irq) {
			error = PCIB_MAP_MSI(device_get_parent(
			    device_get_parent(dev)),
			    dev, irq, &msi_addr, &msi_data);
			if (error != 0)
				return (error);
			DMAR_LOCK(unit);
			(dmd->disable_intr)(unit);
			dmar_write4(unit, dmd->msi_data_reg, msi_data);
			dmar_write4(unit, dmd->msi_addr_reg, msi_addr);
			dmar_write4(unit, dmd->msi_uaddr_reg, msi_addr >> 32);
			(dmd->enable_intr)(unit);
			DMAR_UNLOCK(unit);
			return (0);
		}
	}
Example #4
0
int
dmar_init_qi(struct dmar_unit *unit)
{
	uint64_t iqa;
	uint32_t ics;
	int qi_sz;

	if (!DMAR_HAS_QI(unit) || (unit->hw_cap & DMAR_CAP_CM) != 0)
		return (0);
	unit->qi_enabled = 1;
	TUNABLE_INT_FETCH("hw.dmar.qi", &unit->qi_enabled);
	if (!unit->qi_enabled)
		return (0);

	TAILQ_INIT(&unit->tlb_flush_entries);
	TASK_INIT(&unit->qi_task, 0, dmar_qi_task, unit);
	unit->qi_taskqueue = taskqueue_create_fast("dmarqf", M_WAITOK,
	    taskqueue_thread_enqueue, &unit->qi_taskqueue);
	taskqueue_start_threads(&unit->qi_taskqueue, 1, PI_AV,
	    "dmar%d qi taskq", unit->unit);

	unit->inv_waitd_gen = 0;
	unit->inv_waitd_seq = 1;

	qi_sz = DMAR_IQA_QS_DEF;
	TUNABLE_INT_FETCH("hw.dmar.qi_size", &qi_sz);
	if (qi_sz > DMAR_IQA_QS_MAX)
		qi_sz = DMAR_IQA_QS_MAX;
	unit->inv_queue_size = (1ULL << qi_sz) * PAGE_SIZE;
	/* Reserve one descriptor to prevent wraparound. */
	unit->inv_queue_avail = unit->inv_queue_size - DMAR_IQ_DESCR_SZ;

	/* The invalidation queue reads by DMARs are always coherent. */
	unit->inv_queue = kmem_alloc_contig(kernel_arena, unit->inv_queue_size,
	    M_WAITOK | M_ZERO, 0, dmar_high, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
	unit->inv_waitd_seq_hw_phys = pmap_kextract(
	    (vm_offset_t)&unit->inv_waitd_seq_hw);

	DMAR_LOCK(unit);
	dmar_write8(unit, DMAR_IQT_REG, 0);
	iqa = pmap_kextract(unit->inv_queue);
	iqa |= qi_sz;
	dmar_write8(unit, DMAR_IQA_REG, iqa);
	dmar_enable_qi(unit);
	ics = dmar_read4(unit, DMAR_ICS_REG);
	if ((ics & DMAR_ICS_IWC) != 0) {
		ics = DMAR_ICS_IWC;
		dmar_write4(unit, DMAR_ICS_REG, ics);
	}
	dmar_enable_qi_intr(unit);
	DMAR_UNLOCK(unit);

	return (0);
}