Example #1
0
static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
{
	struct ir_table *table = iommu->ir_table;
	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
	u16 index, start_index;
	unsigned int mask = 0;
	unsigned long flags;
	int i;

	if (!count || !irq_iommu)
		return -1;

	/*
	 * start the IRTE search from index 0.
	 */
	index = start_index = 0;

	if (count > 1) {
		count = __roundup_pow_of_two(count);
		mask = ilog2(count);
	}

	if (mask > ecap_max_handle_mask(iommu->ecap)) {
		printk(KERN_ERR
		       "Requested mask %x exceeds the max invalidation handle"
		       " mask value %Lx\n", mask,
		       ecap_max_handle_mask(iommu->ecap));
		return -1;
	}

	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
	do {
		for (i = index; i < index + count; i++)
			if  (table->base[i].present)
				break;
		/* empty index found */
		if (i == index + count)
			break;

		index = (index + count) % INTR_REMAP_TABLE_ENTRIES;

		if (index == start_index) {
			raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
			printk(KERN_ERR "can't allocate an IRTE\n");
			return -1;
		}
	} while (1);

	for (i = index; i < index + count; i++)
		table->base[i].present = 1;

	irq_iommu->iommu = iommu;
	irq_iommu->irte_index =  index;
	irq_iommu->sub_handle = 0;
	irq_iommu->irte_mask = mask;

	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);

	return index;
}
Example #2
0
static int modify_irte(int irq, struct irte *irte_modified)
{
	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
	struct intel_iommu *iommu;
	unsigned long flags;
	struct irte *irte;
	int rc, index;

	if (!irq_iommu)
		return -1;

	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);

	iommu = irq_iommu->iommu;

	index = irq_iommu->irte_index + irq_iommu->sub_handle;
	irte = &iommu->ir_table->base[index];

	set_64bit(&irte->low, irte_modified->low);
	set_64bit(&irte->high, irte_modified->high);
	__iommu_flush_cache(iommu, irte, sizeof(*irte));

	rc = qi_flush_iec(iommu, index, 0);
	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);

	return rc;
}
static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
{
	struct irq_2_iommu *irq_iommu;

	irq_iommu = irq_2_iommu(irq);

	if (!irq_iommu)
		return NULL;

	if (!irq_iommu->iommu)
		return NULL;

	return irq_iommu;
}
Example #4
0
static int map_irq_to_irte_handle(int irq, u16 *sub_handle)
{
	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
	unsigned long flags;
	int index;

	if (!irq_iommu)
		return -1;

	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
	*sub_handle = irq_iommu->sub_handle;
	index = irq_iommu->irte_index;
	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
	return index;
}
Example #5
0
int get_irte(int irq, struct irte *entry)
{
	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
	unsigned long flags;
	int index;

	if (!entry || !irq_iommu)
		return -1;

	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);

	index = irq_iommu->irte_index + irq_iommu->sub_handle;
	*entry = *(irq_iommu->iommu->ir_table->base + index);

	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
	return 0;
}
Example #6
0
static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
{
	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
	unsigned long flags;

	if (!irq_iommu)
		return -1;

	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);

	irq_iommu->iommu = iommu;
	irq_iommu->irte_index = index;
	irq_iommu->sub_handle = subhandle;
	irq_iommu->irte_mask = 0;

	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);

	return 0;
}
Example #7
0
int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
{
	struct irq_2_iommu *irq_iommu;

	spin_lock(&irq_2_ir_lock);
	irq_iommu = valid_irq_2_iommu(irq);
	if (!irq_iommu) {
		spin_unlock(&irq_2_ir_lock);
		return -1;
	}

	irq_iommu->iommu = NULL;
	irq_iommu->irte_index = 0;
	irq_iommu->sub_handle = 0;
	irq_2_iommu(irq)->irte_mask = 0;

	spin_unlock(&irq_2_ir_lock);

	return 0;
}
static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
{
	struct ir_table *table = iommu->ir_table;
	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
	struct irq_cfg *cfg = irq_get_chip_data(irq);
	unsigned int mask = 0;
	unsigned long flags;
	int index;

	if (!count || !irq_iommu)
		return -1;

	if (count > 1) {
		count = __roundup_pow_of_two(count);
		mask = ilog2(count);
	}

	if (mask > ecap_max_handle_mask(iommu->ecap)) {
		printk(KERN_ERR
		       "Requested mask %x exceeds the max invalidation handle"
		       " mask value %Lx\n", mask,
		       ecap_max_handle_mask(iommu->ecap));
		return -1;
	}

	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
	index = bitmap_find_free_region(table->bitmap,
					INTR_REMAP_TABLE_ENTRIES, mask);
	if (index < 0) {
		pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
	} else {
		cfg->remapped = 1;
		irq_iommu->iommu = iommu;
		irq_iommu->irte_index =  index;
		irq_iommu->sub_handle = 0;
		irq_iommu->irte_mask = mask;
	}
	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);

	return index;
}
int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
{
	struct irq_2_iommu *irq_iommu;
	unsigned long flags;

	spin_lock_irqsave(&irq_2_ir_lock, flags);
	irq_iommu = valid_irq_2_iommu(irq);
	if (!irq_iommu) {
		spin_unlock_irqrestore(&irq_2_ir_lock, flags);
		return -1;
	}

	irq_iommu->iommu = NULL;
	irq_iommu->irte_index = 0;
	irq_iommu->sub_handle = 0;
	irq_2_iommu(irq)->irte_mask = 0;

	spin_unlock_irqrestore(&irq_2_ir_lock, flags);

	return 0;
}
Example #10
0
static int free_irte(int irq)
{
	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
	unsigned long flags;
	int rc;

	if (!irq_iommu)
		return -1;

	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);

	rc = clear_entries(irq_iommu);

	irq_iommu->iommu = NULL;
	irq_iommu->irte_index = 0;
	irq_iommu->sub_handle = 0;
	irq_iommu->irte_mask = 0;

	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);

	return rc;
}
static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
{
	return irq_2_iommu(irq);
}