Exemplo n.º 1
0
static void cq_event_callback(struct ehca_shca *shca,
			      u64 eqe)
{
	struct ehca_cq *cq;
	u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);

	read_lock(&ehca_cq_idr_lock);
	cq = idr_find(&ehca_cq_idr, token);
	if (cq)
		atomic_inc(&cq->nr_events);
	read_unlock(&ehca_cq_idr_lock);

	if (!cq)
		return;

	ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);

	if (atomic_dec_and_test(&cq->nr_events))
		wake_up(&cq->wait_completion);

	return;
}
Exemplo n.º 2
0
void ehca_tasklet_neq(unsigned long data)
{
	struct ehca_shca *shca = (struct ehca_shca*)data;
	struct ehca_eqe *eqe;
	u64 ret;

	eqe = ehca_poll_eq(shca, &shca->neq);

	while (eqe) {
		if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry))
			parse_ec(shca, eqe->entry);

		eqe = ehca_poll_eq(shca, &shca->neq);
	}

	ret = hipz_h_reset_event(shca->ipz_hca_handle,
				 shca->neq.ipz_eq_handle, 0xFFFFFFFFFFFFFFFFL);

	if (ret != H_SUCCESS)
		ehca_err(&shca->ib_device, "Can't clear notification events.");

	return;
}
Exemplo n.º 3
0
int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
		      struct ib_udata *uhw)
{
	int i, ret = 0;
	struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
					      ib_device);
	struct hipz_query_hca *rblock;

	static const u32 cap_mapping[] = {
		IB_DEVICE_RESIZE_MAX_WR,      HCA_CAP_WQE_RESIZE,
		IB_DEVICE_BAD_PKEY_CNTR,      HCA_CAP_BAD_P_KEY_CTR,
		IB_DEVICE_BAD_QKEY_CNTR,      HCA_CAP_Q_KEY_VIOL_CTR,
		IB_DEVICE_RAW_MULTI,          HCA_CAP_RAW_PACKET_MCAST,
		IB_DEVICE_AUTO_PATH_MIG,      HCA_CAP_AUTO_PATH_MIG,
		IB_DEVICE_CHANGE_PHY_PORT,    HCA_CAP_SQD_RTS_PORT_CHANGE,
		IB_DEVICE_UD_AV_PORT_ENFORCE, HCA_CAP_AH_PORT_NR_CHECK,
		IB_DEVICE_CURR_QP_STATE_MOD,  HCA_CAP_CUR_QP_STATE_MOD,
		IB_DEVICE_SHUTDOWN_PORT,      HCA_CAP_SHUTDOWN_PORT,
		IB_DEVICE_INIT_TYPE,          HCA_CAP_INIT_TYPE,
		IB_DEVICE_PORT_ACTIVE_EVENT,  HCA_CAP_PORT_ACTIVE_EVENT,
	};

	if (uhw->inlen || uhw->outlen)
		return -EINVAL;

	rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
	if (!rblock) {
		ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
		return -ENOMEM;
	}

	if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
		ehca_err(&shca->ib_device, "Can't query device properties");
		ret = -EINVAL;
		goto query_device1;
	}

	memset(props, 0, sizeof(struct ib_device_attr));
	props->page_size_cap   = shca->hca_cap_mr_pgsize;
	props->fw_ver          = rblock->hw_ver;
	props->max_mr_size     = rblock->max_mr_size;
	props->vendor_id       = rblock->vendor_id >> 8;
	props->vendor_part_id  = rblock->vendor_part_id >> 16;
	props->hw_ver          = rblock->hw_ver;
	props->max_qp          = limit_uint(rblock->max_qp);
	props->max_qp_wr       = limit_uint(rblock->max_wqes_wq);
	props->max_sge         = limit_uint(rblock->max_sge);
	props->max_sge_rd      = limit_uint(rblock->max_sge_rd);
	props->max_cq          = limit_uint(rblock->max_cq);
	props->max_cqe         = limit_uint(rblock->max_cqe);
	props->max_mr          = limit_uint(rblock->max_mr);
	props->max_mw          = limit_uint(rblock->max_mw);
	props->max_pd          = limit_uint(rblock->max_pd);
	props->max_ah          = limit_uint(rblock->max_ah);
	props->max_ee          = limit_uint(rblock->max_rd_ee_context);
	props->max_rdd         = limit_uint(rblock->max_rd_domain);
	props->max_fmr         = limit_uint(rblock->max_mr);
	props->max_qp_rd_atom  = limit_uint(rblock->max_rr_qp);
	props->max_ee_rd_atom  = limit_uint(rblock->max_rr_ee_context);
	props->max_res_rd_atom = limit_uint(rblock->max_rr_hca);
	props->max_qp_init_rd_atom = limit_uint(rblock->max_act_wqs_qp);
	props->max_ee_init_rd_atom = limit_uint(rblock->max_act_wqs_ee_context);

	if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
		props->max_srq         = limit_uint(props->max_qp);
		props->max_srq_wr      = limit_uint(props->max_qp_wr);
		props->max_srq_sge     = 3;
	}

	props->max_pkeys           = 16;
	/* Some FW versions say 0 here; insert sensible value in that case */
	props->local_ca_ack_delay  = rblock->local_ca_ack_delay ?
		min_t(u8, rblock->local_ca_ack_delay, 255) : 12;
	props->max_raw_ipv6_qp     = limit_uint(rblock->max_raw_ipv6_qp);
	props->max_raw_ethy_qp     = limit_uint(rblock->max_raw_ethy_qp);
	props->max_mcast_grp       = limit_uint(rblock->max_mcast_grp);
	props->max_mcast_qp_attach = limit_uint(rblock->max_mcast_qp_attach);
	props->max_total_mcast_qp_attach
		= limit_uint(rblock->max_total_mcast_qp_attach);

	/* translate device capabilities */
	props->device_cap_flags = IB_DEVICE_SYS_IMAGE_GUID |
		IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_N_NOTIFY_CQ;
	for (i = 0; i < ARRAY_SIZE(cap_mapping); i += 2)
		if (rblock->hca_cap_indicators & cap_mapping[i + 1])
			props->device_cap_flags |= cap_mapping[i];

query_device1:
	ehca_free_fw_ctrlblock(rblock);

	return ret;
}
Exemplo n.º 4
0
void ehca_process_eq(struct ehca_shca *shca, int is_irq)
{
	struct ehca_eq *eq = &shca->eq;
	struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
	u64 eqe_value, ret;
	unsigned long flags;
	int eqe_cnt, i;
	int eq_empty = 0;

	spin_lock_irqsave(&eq->irq_spinlock, flags);
	if (is_irq) {
		const int max_query_cnt = 100;
		int query_cnt = 0;
		int int_state = 1;
		do {
			int_state = hipz_h_query_int_state(
				shca->ipz_hca_handle, eq->ist);
			query_cnt++;
			iosync();
		} while (int_state && query_cnt < max_query_cnt);
		if (unlikely((query_cnt == max_query_cnt)))
			ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x",
				 int_state, query_cnt);
	}

	/* read out all eqes */
	eqe_cnt = 0;
	do {
		u32 token;
		eqe_cache[eqe_cnt].eqe = ehca_poll_eq(shca, eq);
		if (!eqe_cache[eqe_cnt].eqe)
			break;
		eqe_value = eqe_cache[eqe_cnt].eqe->entry;
		if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
			token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
			read_lock(&ehca_cq_idr_lock);
			eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
			if (eqe_cache[eqe_cnt].cq)
				atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
			read_unlock(&ehca_cq_idr_lock);
			if (!eqe_cache[eqe_cnt].cq) {
				ehca_err(&shca->ib_device,
					 "Invalid eqe for non-existing cq "
					 "token=%x", token);
				continue;
			}
		} else
			eqe_cache[eqe_cnt].cq = NULL;
		eqe_cnt++;
	} while (eqe_cnt < EHCA_EQE_CACHE_SIZE);
	if (!eqe_cnt) {
		if (is_irq)
			ehca_dbg(&shca->ib_device,
				 "No eqe found for irq event");
		goto unlock_irq_spinlock;
	} else if (!is_irq) {
		ret = hipz_h_eoi(eq->ist);
		if (ret != H_SUCCESS)
			ehca_err(&shca->ib_device,
				 "bad return code EOI -rc = %lld\n", ret);
		ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
	}
	if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
		ehca_dbg(&shca->ib_device, "too many eqes for one irq event");
	/* enable irq for new packets */
	for (i = 0; i < eqe_cnt; i++) {
		if (eq->eqe_cache[i].cq)
			reset_eq_pending(eq->eqe_cache[i].cq);
	}
	/* check eq */
	spin_lock(&eq->spinlock);
	eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue));
	spin_unlock(&eq->spinlock);
	/* call completion handler for cached eqes */
	for (i = 0; i < eqe_cnt; i++)
		if (eq->eqe_cache[i].cq) {
			if (ehca_scaling_code)
				queue_comp_task(eq->eqe_cache[i].cq);
			else {
				struct ehca_cq *cq = eq->eqe_cache[i].cq;
				comp_event_callback(cq);
				if (atomic_dec_and_test(&cq->nr_events))
					wake_up(&cq->wait_completion);
			}
		} else {
			ehca_dbg(&shca->ib_device, "Got non completion event");
			parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
		}
	/* poll eq if not empty */
	if (eq_empty)
		goto unlock_irq_spinlock;
	do {
		struct ehca_eqe *eqe;
		eqe = ehca_poll_eq(shca, &shca->eq);
		if (!eqe)
			break;
		process_eqe(shca, eqe);
	} while (1);

unlock_irq_spinlock:
	spin_unlock_irqrestore(&eq->irq_spinlock, flags);
}
Exemplo n.º 5
0
static void parse_ec(struct ehca_shca *shca, u64 eqe)
{
	u8 ec   = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
	u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
	u8 spec_event;
	struct ehca_sport *sport = &shca->sport[port - 1];

	switch (ec) {
	case 0x30: /* port availability change */
		if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
			/* only replay modify_qp calls in autodetect mode;
			 * if AQP1 was destroyed, the port is already down
			 * again and we can drop the event.
			 */
			if (ehca_nr_ports < 0)
				if (replay_modify_qp(sport))
					break;

			sport->port_state = IB_PORT_ACTIVE;
			dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
					    "is active");
			ehca_query_sma_attr(shca, port, &sport->saved_attr);
		} else {
			sport->port_state = IB_PORT_DOWN;
			dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
					    "is inactive");
		}
		break;
	case 0x31:
		/* port configuration change
		 * disruptive change is caused by
		 * LID, PKEY or SM change
		 */
		if (EHCA_BMASK_GET(NEQE_DISRUPTIVE, eqe)) {
			ehca_warn(&shca->ib_device, "disruptive port "
				  "%d configuration change", port);

			sport->port_state = IB_PORT_DOWN;
			dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
					    "is inactive");

			sport->port_state = IB_PORT_ACTIVE;
			dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
					    "is active");
			ehca_query_sma_attr(shca, port,
					    &sport->saved_attr);
		} else
			notify_port_conf_change(shca, port);
		break;
	case 0x32: /* adapter malfunction */
		ehca_err(&shca->ib_device, "Adapter malfunction.");
		break;
	case 0x33:  /* trace stopped */
		ehca_err(&shca->ib_device, "Traced stopped.");
		break;
	case 0x34: /* util async event */
		spec_event = EHCA_BMASK_GET(NEQE_SPECIFIC_EVENT, eqe);
		if (spec_event == 0x80) /* client reregister required */
			dispatch_port_event(shca, port,
					    IB_EVENT_CLIENT_REREGISTER,
					    "client reregister req.");
		else
			ehca_warn(&shca->ib_device, "Unknown util async "
				  "event %x on port %x", spec_event, port);
		break;
	default:
		ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
			 ec, shca->ib_device.name);
		break;
	}

	return;
}
Exemplo n.º 6
0
static void parse_identifier(struct ehca_shca *shca, u64 eqe)
{
	u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe);

	switch (identifier) {
	case 0x02: /* path migrated */
		qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG, 0);
		break;
	case 0x03: /* communication established */
		qp_event_callback(shca, eqe, IB_EVENT_COMM_EST, 0);
		break;
	case 0x04: /* send queue drained */
		qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED, 0);
		break;
	case 0x05: /* QP error */
	case 0x06: /* QP error */
		qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL, 1);
		break;
	case 0x07: /* CQ error */
	case 0x08: /* CQ error */
		cq_event_callback(shca, eqe);
		break;
	case 0x09: /* MRMWPTE error */
		ehca_err(&shca->ib_device, "MRMWPTE error.");
		break;
	case 0x0A: /* port event */
		ehca_err(&shca->ib_device, "Port event.");
		break;
	case 0x0B: /* MR access error */
		ehca_err(&shca->ib_device, "MR access error.");
		break;
	case 0x0C: /* EQ error */
		ehca_err(&shca->ib_device, "EQ error.");
		break;
	case 0x0D: /* P/Q_Key mismatch */
		ehca_err(&shca->ib_device, "P/Q_Key mismatch.");
		break;
	case 0x10: /* sampling complete */
		ehca_err(&shca->ib_device, "Sampling complete.");
		break;
	case 0x11: /* unaffiliated access error */
		ehca_err(&shca->ib_device, "Unaffiliated access error.");
		break;
	case 0x12: /* path migrating */
		ehca_err(&shca->ib_device, "Path migrating.");
		break;
	case 0x13: /* interface trace stopped */
		ehca_err(&shca->ib_device, "Interface trace stopped.");
		break;
	case 0x14: /* first error capture info available */
		ehca_info(&shca->ib_device, "First error capture available");
		break;
	case 0x15: /* SRQ limit reached */
		qp_event_callback(shca, eqe, IB_EVENT_SRQ_LIMIT_REACHED, 0);
		break;
	default:
		ehca_err(&shca->ib_device, "Unknown identifier: %x on %s.",
			 identifier, shca->ib_device.name);
		break;
	}

	return;
}
Exemplo n.º 7
0
static void parse_ec(struct ehca_shca *shca, u64 eqe)
{
	u8 ec   = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
	u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
	u8 spec_event;
	struct ehca_sport *sport = &shca->sport[port - 1];
	unsigned long flags;

	switch (ec) {
	case 0x30: /* port availability change */
		if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
			int suppress_event;
			/* replay modify_qp for sqps */
			spin_lock_irqsave(&sport->mod_sqp_lock, flags);
			suppress_event = !sport->ibqp_sqp[IB_QPT_GSI];
			if (sport->ibqp_sqp[IB_QPT_SMI])
				ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]);
			if (!suppress_event)
				ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]);
			spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);

			/* AQP1 was destroyed, ignore this event */
			if (suppress_event)
				break;

			sport->port_state = IB_PORT_ACTIVE;
			dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
					    "is active");
			ehca_query_sma_attr(shca, port,
					    &sport->saved_attr);
		} else {
			sport->port_state = IB_PORT_DOWN;
			dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
					    "is inactive");
		}
		break;
	case 0x31:
		/* port configuration change
		 * disruptive change is caused by
		 * LID, PKEY or SM change
		 */
		if (EHCA_BMASK_GET(NEQE_DISRUPTIVE, eqe)) {
			ehca_warn(&shca->ib_device, "disruptive port "
				  "%d configuration change", port);

			sport->port_state = IB_PORT_DOWN;
			dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
					    "is inactive");

			sport->port_state = IB_PORT_ACTIVE;
			dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
					    "is active");
			ehca_query_sma_attr(shca, port,
					    &sport->saved_attr);
		} else
			notify_port_conf_change(shca, port);
		break;
	case 0x32: /* adapter malfunction */
		ehca_err(&shca->ib_device, "Adapter malfunction.");
		break;
	case 0x33:  /* trace stopped */
		ehca_err(&shca->ib_device, "Traced stopped.");
		break;
	case 0x34: /* util async event */
		spec_event = EHCA_BMASK_GET(NEQE_SPECIFIC_EVENT, eqe);
		if (spec_event == 0x80) /* client reregister required */
			dispatch_port_event(shca, port,
					    IB_EVENT_CLIENT_REREGISTER,
					    "client reregister req.");
		else
			ehca_warn(&shca->ib_device, "Unknown util async "
				  "event %x on port %x", spec_event, port);
		break;
	default:
		ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
			 ec, shca->ib_device.name);
		break;
	}

	return;
}
Exemplo n.º 8
0
u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
			     struct ehca_alloc_qp_parms *parms, int is_user)
{
	int rc;
	u64 ret;
	u64 allocate_controls, max_r10_reg, r11, r12;
	unsigned long outs[PLPAR_HCALL9_BUFSIZE];

	allocate_controls =
		EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type)
		| EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
		| EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
		| EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
		| EHCA_BMASK_SET(H_ALL_RES_QP_STORAGE, parms->qp_storage)
		| EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE,
				 parms->squeue.page_size)
		| EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE,
				 parms->rqueue.page_size)
		| EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
				 !!(parms->ll_comp_flags & LLQP_RECV_COMP))
		| EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
				 !!(parms->ll_comp_flags & LLQP_SEND_COMP))
		| EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
				 parms->ud_av_l_key_ctl)
		| EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);

	max_r10_reg =
		EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
			       parms->squeue.max_wr + 1)
		| EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
				 parms->rqueue.max_wr + 1)
		| EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
				 parms->squeue.max_sge)
		| EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
				 parms->rqueue.max_sge);

	r11 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QP_TOKEN, parms->srq_token);

	if (parms->ext_type == EQPT_SRQ)
		r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_LIMIT, parms->srq_limit);
	else
		r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QPN, parms->srq_qpn);

	ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
				adapter_handle.handle,	           
				allocate_controls,	           
				parms->send_cq_handle.handle,
				parms->recv_cq_handle.handle,
				parms->eq_handle.handle,
				((u64)parms->token << 32) | parms->pd.value,
				max_r10_reg, r11, r12);

	parms->qp_handle.handle = outs[0];
	parms->real_qp_num = (u32)outs[1];
	parms->squeue.act_nr_wqes =
		(u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
	parms->rqueue.act_nr_wqes =
		(u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
	parms->squeue.act_nr_sges =
		(u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]);
	parms->rqueue.act_nr_sges =
		(u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]);
	parms->squeue.queue_size =
		(u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]);
	parms->rqueue.queue_size =
		(u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);

	if (ret == H_SUCCESS) {
		rc = hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
		if (rc) {
			ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
				     rc, outs[6]);

			ehca_plpar_hcall_norets(H_FREE_RESOURCE,
						adapter_handle.handle,     
						parms->qp_handle.handle,  
						0, 0, 0, 0, 0);
			ret = H_NO_MEM;
		}
	}

	if (ret == H_NOT_ENOUGH_RESOURCES)
		ehca_gen_err("Not enough resources. ret=%lli", ret);

	return ret;
}
Exemplo n.º 9
0
u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
			     struct ehca_qp *qp,
			     struct ehca_alloc_qp_parms *parms)
{
	u64 ret;
	u64 allocate_controls;
	u64 max_r10_reg;
	u64 outs[PLPAR_HCALL9_BUFSIZE];
	u16 max_nr_receive_wqes = qp->init_attr.cap.max_recv_wr + 1;
	u16 max_nr_send_wqes = qp->init_attr.cap.max_send_wr + 1;
	int daqp_ctrl = parms->daqp_ctrl;

	allocate_controls =
		EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS,
			       (daqp_ctrl & DAQP_CTRL_ENABLE) ? 1 : 0)
		| EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
		| EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
		| EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
		| EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
				 (daqp_ctrl & DAQP_CTRL_RECV_COMP) ? 1 : 0)
		| EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
				 (daqp_ctrl & DAQP_CTRL_SEND_COMP) ? 1 : 0)
		| EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
				 parms->ud_av_l_key_ctl)
		| EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);

	max_r10_reg =
		EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
			       max_nr_send_wqes)
		| EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
				 max_nr_receive_wqes)
		| EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
				 parms->max_send_sge)
		| EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
				 parms->max_recv_sge);

	ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
				adapter_handle.handle,	           /* r4  */
				allocate_controls,	           /* r5  */
				qp->send_cq->ipz_cq_handle.handle,
				qp->recv_cq->ipz_cq_handle.handle,
				parms->ipz_eq_handle.handle,
				((u64)qp->token << 32) | parms->pd.value,
				max_r10_reg,	                   /* r10 */
				parms->ud_av_l_key_ctl,            /* r11 */
				0);
	qp->ipz_qp_handle.handle = outs[0];
	qp->real_qp_num = (u32)outs[1];
	parms->act_nr_send_wqes =
		(u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
	parms->act_nr_recv_wqes =
		(u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
	parms->act_nr_send_sges =
		(u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]);
	parms->act_nr_recv_sges =
		(u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]);
	parms->nr_sq_pages =
		(u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]);
	parms->nr_rq_pages =
		(u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);

	if (ret == H_SUCCESS)
		hcp_galpas_ctor(&qp->galpas, outs[6], outs[6]);

	if (ret == H_NOT_ENOUGH_RESOURCES)
		ehca_gen_err("Not enough resources. ret=%lx", ret);

	return ret;
}
Exemplo n.º 10
0
int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
{
	int i, ret = 0;
	struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
					      ib_device);
	struct hipz_query_hca *rblock;

	static const u32 cap_mapping[] = {
		IB_DEVICE_RESIZE_MAX_WR,      HCA_CAP_WQE_RESIZE,
		IB_DEVICE_BAD_PKEY_CNTR,      HCA_CAP_BAD_P_KEY_CTR,
		IB_DEVICE_BAD_QKEY_CNTR,      HCA_CAP_Q_KEY_VIOL_CTR,
		IB_DEVICE_RAW_MULTI,          HCA_CAP_RAW_PACKET_MCAST,
		IB_DEVICE_AUTO_PATH_MIG,      HCA_CAP_AUTO_PATH_MIG,
		IB_DEVICE_CHANGE_PHY_PORT,    HCA_CAP_SQD_RTS_PORT_CHANGE,
		IB_DEVICE_UD_AV_PORT_ENFORCE, HCA_CAP_AH_PORT_NR_CHECK,
		IB_DEVICE_CURR_QP_STATE_MOD,  HCA_CAP_CUR_QP_STATE_MOD,
		IB_DEVICE_SHUTDOWN_PORT,      HCA_CAP_SHUTDOWN_PORT,
		IB_DEVICE_INIT_TYPE,          HCA_CAP_INIT_TYPE,
		IB_DEVICE_PORT_ACTIVE_EVENT,  HCA_CAP_PORT_ACTIVE_EVENT,
	};

	rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
	if (!rblock) {
		ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
		return -ENOMEM;
	}

	if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
		ehca_err(&shca->ib_device, "Can't query device properties");
		ret = -EINVAL;
		goto query_device1;
	}

	memset(props, 0, sizeof(struct ib_device_attr));
	props->fw_ver          = rblock->hw_ver;
	props->max_mr_size     = rblock->max_mr_size;
	props->vendor_id       = rblock->vendor_id >> 8;
	props->vendor_part_id  = rblock->vendor_part_id >> 16;
	props->hw_ver          = rblock->hw_ver;
	props->max_qp          = min_t(int, rblock->max_qp, INT_MAX);
	props->max_qp_wr       = min_t(int, rblock->max_wqes_wq, INT_MAX);
	props->max_sge         = min_t(int, rblock->max_sge, INT_MAX);
	props->max_sge_rd      = min_t(int, rblock->max_sge_rd, INT_MAX);
	props->max_cq          = min_t(int, rblock->max_cq, INT_MAX);
	props->max_cqe         = min_t(int, rblock->max_cqe, INT_MAX);
	props->max_mr          = min_t(int, rblock->max_mr, INT_MAX);
	props->max_mw          = min_t(int, rblock->max_mw, INT_MAX);
	props->max_pd          = min_t(int, rblock->max_pd, INT_MAX);
	props->max_ah          = min_t(int, rblock->max_ah, INT_MAX);
	props->max_fmr         = min_t(int, rblock->max_mr, INT_MAX);

	if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
		props->max_srq         = props->max_qp;
		props->max_srq_wr      = props->max_qp_wr;
		props->max_srq_sge     = 3;
	}

	props->max_pkeys       = 16;
	props->local_ca_ack_delay
		= rblock->local_ca_ack_delay;
	props->max_raw_ipv6_qp
		= min_t(int, rblock->max_raw_ipv6_qp, INT_MAX);
	props->max_raw_ethy_qp
		= min_t(int, rblock->max_raw_ethy_qp, INT_MAX);
	props->max_mcast_grp
		= min_t(int, rblock->max_mcast_grp, INT_MAX);
	props->max_mcast_qp_attach
		= min_t(int, rblock->max_mcast_qp_attach, INT_MAX);
	props->max_total_mcast_qp_attach
		= min_t(int, rblock->max_total_mcast_qp_attach, INT_MAX);

	/* translate device capabilities */
	props->device_cap_flags = IB_DEVICE_SYS_IMAGE_GUID |
		IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_N_NOTIFY_CQ;
	for (i = 0; i < ARRAY_SIZE(cap_mapping); i += 2)
		if (rblock->hca_cap_indicators & cap_mapping[i + 1])
			props->device_cap_flags |= cap_mapping[i];

query_device1:
	ehca_free_fw_ctrlblock(rblock);

	return ret;
}
Exemplo n.º 11
0
static void parse_ec(struct ehca_shca *shca, u64 eqe)
{
    struct ib_event event;
    u8 ec   = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
    u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);

    switch (ec) {
    case 0x30: /* port availability change */
        if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
            ehca_info(&shca->ib_device,
                      "port %x is active.", port);
            event.device = &shca->ib_device;
            event.event = IB_EVENT_PORT_ACTIVE;
            event.element.port_num = port;
            shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
            ib_dispatch_event(&event);
        } else {
            ehca_info(&shca->ib_device,
                      "port %x is inactive.", port);
            event.device = &shca->ib_device;
            event.event = IB_EVENT_PORT_ERR;
            event.element.port_num = port;
            shca->sport[port - 1].port_state = IB_PORT_DOWN;
            ib_dispatch_event(&event);
        }
        break;
    case 0x31:
        /* port configuration change
         * disruptive change is caused by
         * LID, PKEY or SM change
         */
        ehca_warn(&shca->ib_device,
                  "disruptive port %x configuration change", port);

        ehca_info(&shca->ib_device,
                  "port %x is inactive.", port);
        event.device = &shca->ib_device;
        event.event = IB_EVENT_PORT_ERR;
        event.element.port_num = port;
        shca->sport[port - 1].port_state = IB_PORT_DOWN;
        ib_dispatch_event(&event);

        ehca_info(&shca->ib_device,
                  "port %x is active.", port);
        event.device = &shca->ib_device;
        event.event = IB_EVENT_PORT_ACTIVE;
        event.element.port_num = port;
        shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
        ib_dispatch_event(&event);
        break;
    case 0x32: /* adapter malfunction */
        ehca_err(&shca->ib_device, "Adapter malfunction.");
        break;
    case 0x33:  /* trace stopped */
        ehca_err(&shca->ib_device, "Traced stopped.");
        break;
    default:
        ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
                 ec, shca->ib_device.name);
        break;
    }

    return;
}