Exemplo n.º 1
0
static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
{
	struct ir_table *table = iommu->ir_table;
	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
	u16 index, start_index;
	unsigned int mask = 0;
	unsigned long flags;
	int i;

	if (!count || !irq_iommu)
		return -1;

	/*
	 * start the IRTE search from index 0.
	 */
	index = start_index = 0;

	if (count > 1) {
		count = __roundup_pow_of_two(count);
		mask = ilog2(count);
	}

	if (mask > ecap_max_handle_mask(iommu->ecap)) {
		printk(KERN_ERR
		       "Requested mask %x exceeds the max invalidation handle"
		       " mask value %Lx\n", mask,
		       ecap_max_handle_mask(iommu->ecap));
		return -1;
	}

	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
	do {
		for (i = index; i < index + count; i++)
			if  (table->base[i].present)
				break;
		/* empty index found */
		if (i == index + count)
			break;

		index = (index + count) % INTR_REMAP_TABLE_ENTRIES;

		if (index == start_index) {
			raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
			printk(KERN_ERR "can't allocate an IRTE\n");
			return -1;
		}
	} while (1);

	for (i = index; i < index + count; i++)
		table->base[i].present = 1;

	irq_iommu->iommu = iommu;
	irq_iommu->irte_index =  index;
	irq_iommu->sub_handle = 0;
	irq_iommu->irte_mask = mask;

	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);

	return index;
}
Exemplo n.º 2
0
static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
				       unsigned long address, unsigned long pages, int ih, int gl)
{
	struct qi_desc desc;

	if (pages == -1) {
		/* For global kernel pages we have to flush them in *all* PASIDs
		 * because that's the only option the hardware gives us. Despite
		 * the fact that they are actually only accessible through one. */
		if (gl)
			desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) |
				QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) | QI_EIOTLB_TYPE;
		else
			desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) |
				QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | QI_EIOTLB_TYPE;
		desc.high = 0;
	} else {
		int mask = ilog2(__roundup_pow_of_two(pages));

		desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) |
			QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | QI_EIOTLB_TYPE;
		desc.high = QI_EIOTLB_ADDR(address) | QI_EIOTLB_GL(gl) |
			QI_EIOTLB_IH(ih) | QI_EIOTLB_AM(mask);
	}
	qi_submit_sync(&desc, svm->iommu);

	if (sdev->dev_iotlb) {
		desc.low = QI_DEV_EIOTLB_PASID(svm->pasid) | QI_DEV_EIOTLB_SID(sdev->sid) |
			QI_DEV_EIOTLB_QDEP(sdev->qdep) | QI_DEIOTLB_TYPE;
		if (pages == -1) {
			desc.high = QI_DEV_EIOTLB_ADDR(-1ULL >> 1) | QI_DEV_EIOTLB_SIZE;
		} else if (pages > 1) {
Exemplo n.º 3
0
static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
{
	struct ir_table *table = iommu->ir_table;
	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
	struct irq_cfg *cfg = irq_get_chip_data(irq);
	unsigned int mask = 0;
	unsigned long flags;
	int index;

	if (!count || !irq_iommu)
		return -1;

	if (count > 1) {
		count = __roundup_pow_of_two(count);
		mask = ilog2(count);
	}

	if (mask > ecap_max_handle_mask(iommu->ecap)) {
		printk(KERN_ERR
		       "Requested mask %x exceeds the max invalidation handle"
		       " mask value %Lx\n", mask,
		       ecap_max_handle_mask(iommu->ecap));
		return -1;
	}

	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
	index = bitmap_find_free_region(table->bitmap,
					INTR_REMAP_TABLE_ENTRIES, mask);
	if (index < 0) {
		pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
	} else {
		cfg->remapped = 1;
		irq_iommu->iommu = iommu;
		irq_iommu->irte_index =  index;
		irq_iommu->sub_handle = 0;
		irq_iommu->irte_mask = mask;
	}
	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);

	return index;
}
Exemplo n.º 4
0
/* called by the modify qp verb */
int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
		     struct ib_udata *udata)
{
	int err;
	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
	union ib_gid sgid;
	struct ib_gid_attr sgid_attr;

	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
		int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);

		qp->attr.max_rd_atomic = max_rd_atomic;
		atomic_set(&qp->req.rd_atomic, max_rd_atomic);
	}

	if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
		int max_dest_rd_atomic =
			__roundup_pow_of_two(attr->max_dest_rd_atomic);

		qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;

		free_rd_atomic_resources(qp);

		err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
		if (err)
			return err;
	}

	if (mask & IB_QP_CUR_STATE)
		qp->attr.cur_qp_state = attr->qp_state;

	if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
		qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;

	if (mask & IB_QP_ACCESS_FLAGS)
		qp->attr.qp_access_flags = attr->qp_access_flags;

	if (mask & IB_QP_PKEY_INDEX)
		qp->attr.pkey_index = attr->pkey_index;

	if (mask & IB_QP_PORT)
		qp->attr.port_num = attr->port_num;

	if (mask & IB_QP_QKEY)
		qp->attr.qkey = attr->qkey;

	if (mask & IB_QP_AV) {
		ib_get_cached_gid(&rxe->ib_dev, 1,
				  attr->ah_attr.grh.sgid_index, &sgid,
				  &sgid_attr);
		rxe_av_from_attr(rxe, attr->port_num, &qp->pri_av,
				 &attr->ah_attr);
		rxe_av_fill_ip_info(rxe, &qp->pri_av, &attr->ah_attr,
				    &sgid_attr, &sgid);
		if (sgid_attr.ndev)
			dev_put(sgid_attr.ndev);
	}

	if (mask & IB_QP_ALT_PATH) {
		ib_get_cached_gid(&rxe->ib_dev, 1,
				  attr->alt_ah_attr.grh.sgid_index, &sgid,
				  &sgid_attr);

		rxe_av_from_attr(rxe, attr->alt_port_num, &qp->alt_av,
				 &attr->alt_ah_attr);
		rxe_av_fill_ip_info(rxe, &qp->alt_av, &attr->alt_ah_attr,
				    &sgid_attr, &sgid);
		if (sgid_attr.ndev)
			dev_put(sgid_attr.ndev);

		qp->attr.alt_port_num = attr->alt_port_num;
		qp->attr.alt_pkey_index = attr->alt_pkey_index;
		qp->attr.alt_timeout = attr->alt_timeout;
	}

	if (mask & IB_QP_PATH_MTU) {
		qp->attr.path_mtu = attr->path_mtu;
		qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
	}

	if (mask & IB_QP_TIMEOUT) {
		qp->attr.timeout = attr->timeout;
		if (attr->timeout == 0) {
			qp->qp_timeout_jiffies = 0;
		} else {
			/* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
			int j = nsecs_to_jiffies(4096ULL << attr->timeout);

			qp->qp_timeout_jiffies = j ? j : 1;
		}
	}

	if (mask & IB_QP_RETRY_CNT) {
		qp->attr.retry_cnt = attr->retry_cnt;
		qp->comp.retry_cnt = attr->retry_cnt;
		pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
			 attr->retry_cnt);
	}

	if (mask & IB_QP_RNR_RETRY) {
		qp->attr.rnr_retry = attr->rnr_retry;
		qp->comp.rnr_retry = attr->rnr_retry;
		pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
			 attr->rnr_retry);
	}

	if (mask & IB_QP_RQ_PSN) {
		qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
		qp->resp.psn = qp->attr.rq_psn;
		pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
			 qp->resp.psn);
	}

	if (mask & IB_QP_MIN_RNR_TIMER) {
		qp->attr.min_rnr_timer = attr->min_rnr_timer;
		pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
			 attr->min_rnr_timer);
	}

	if (mask & IB_QP_SQ_PSN) {
		qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
		qp->req.psn = qp->attr.sq_psn;
		qp->comp.psn = qp->attr.sq_psn;
		pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
	}

	if (mask & IB_QP_PATH_MIG_STATE)
		qp->attr.path_mig_state = attr->path_mig_state;

	if (mask & IB_QP_DEST_QPN)
		qp->attr.dest_qp_num = attr->dest_qp_num;

	if (mask & IB_QP_STATE) {
		qp->attr.qp_state = attr->qp_state;

		switch (attr->qp_state) {
		case IB_QPS_RESET:
			pr_debug("qp#%d state -> RESET\n", qp_num(qp));
			rxe_qp_reset(qp);
			break;

		case IB_QPS_INIT:
			pr_debug("qp#%d state -> INIT\n", qp_num(qp));
			qp->req.state = QP_STATE_INIT;
			qp->resp.state = QP_STATE_INIT;
			break;

		case IB_QPS_RTR:
			pr_debug("qp#%d state -> RTR\n", qp_num(qp));
			qp->resp.state = QP_STATE_READY;
			break;

		case IB_QPS_RTS:
			pr_debug("qp#%d state -> RTS\n", qp_num(qp));
			qp->req.state = QP_STATE_READY;
			break;

		case IB_QPS_SQD:
			pr_debug("qp#%d state -> SQD\n", qp_num(qp));
			rxe_qp_drain(qp);
			break;

		case IB_QPS_SQE:
			pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
			/* Not possible from modify_qp. */
			break;

		case IB_QPS_ERR:
			pr_debug("qp#%d state -> ERR\n", qp_num(qp));
			rxe_qp_error(qp);
			break;
		}
	}

	return 0;
}
Exemplo n.º 5
0
int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
{
	struct ir_table *table = iommu->ir_table;
	struct irq_2_iommu *irq_iommu;
	u16 index, start_index;
	unsigned int mask = 0;
	int i;

	if (!count)
		return -1;

#ifndef CONFIG_SPARSE_IRQ
	/* protect irq_2_iommu_alloc later */
	if (irq >= nr_irqs)
		return -1;
#endif

	/*
	 * start the IRTE search from index 0.
	 */
	index = start_index = 0;

	if (count > 1) {
		count = __roundup_pow_of_two(count);
		mask = ilog2(count);
	}

	if (mask > ecap_max_handle_mask(iommu->ecap)) {
		printk(KERN_ERR
		       "Requested mask %x exceeds the max invalidation handle"
		       " mask value %Lx\n", mask,
		       ecap_max_handle_mask(iommu->ecap));
		return -1;
	}

	spin_lock(&irq_2_ir_lock);
	do {
		for (i = index; i < index + count; i++)
			if  (table->base[i].present)
				break;
		/* empty index found */
		if (i == index + count)
			break;

		index = (index + count) % INTR_REMAP_TABLE_ENTRIES;

		if (index == start_index) {
			spin_unlock(&irq_2_ir_lock);
			printk(KERN_ERR "can't allocate an IRTE\n");
			return -1;
		}
	} while (1);

	for (i = index; i < index + count; i++)
		table->base[i].present = 1;

	irq_iommu = irq_2_iommu_alloc(irq);
	if (!irq_iommu) {
		spin_unlock(&irq_2_ir_lock);
		printk(KERN_ERR "can't allocate irq_2_iommu\n");
		return -1;
	}

	irq_iommu->iommu = iommu;
	irq_iommu->irte_index =  index;
	irq_iommu->sub_handle = 0;
	irq_iommu->irte_mask = mask;

	spin_unlock(&irq_2_ir_lock);

	return index;
}