Example #1
0
File: rxe.c Project: Tyler-D/RXE
/* initialize port attributes */
static int rxe_init_port_param(struct rxe_dev *rxe, unsigned int port_num)
{
	struct rxe_port *port = &rxe->port[port_num - 1];

	port->attr.state		= RXE_PORT_STATE;
	port->attr.max_mtu		= RXE_PORT_MAX_MTU;
	port->attr.active_mtu		= RXE_PORT_ACTIVE_MTU;
	port->attr.gid_tbl_len		= RXE_PORT_GID_TBL_LEN;
	port->attr.port_cap_flags	= RXE_PORT_PORT_CAP_FLAGS;
	port->attr.max_msg_sz		= RXE_PORT_MAX_MSG_SZ;
	port->attr.bad_pkey_cntr	= RXE_PORT_BAD_PKEY_CNTR;
	port->attr.qkey_viol_cntr	= RXE_PORT_QKEY_VIOL_CNTR;
	port->attr.pkey_tbl_len		= RXE_PORT_PKEY_TBL_LEN;
	port->attr.lid			= RXE_PORT_LID;
	port->attr.sm_lid		= RXE_PORT_SM_LID;
	port->attr.lmc			= RXE_PORT_LMC;
	port->attr.max_vl_num		= RXE_PORT_MAX_VL_NUM;
	port->attr.sm_sl		= RXE_PORT_SM_SL;
	port->attr.subnet_timeout	= RXE_PORT_SUBNET_TIMEOUT;
	port->attr.init_type_reply	= RXE_PORT_INIT_TYPE_REPLY;
	port->attr.active_width		= RXE_PORT_ACTIVE_WIDTH;
	port->attr.active_speed		= RXE_PORT_ACTIVE_SPEED;
	port->attr.phys_state		= RXE_PORT_PHYS_STATE;
	port->mtu_cap			=
				rxe_mtu_enum_to_int(RXE_PORT_ACTIVE_MTU);
	port->subnet_prefix		= cpu_to_be64(RXE_PORT_SUBNET_PREFIX);

	return 0;
}
Example #2
0
File: rxe.c Project: Tyler-D/RXE
int rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu,
		unsigned int port_num)
{
	struct rxe_port *port = &rxe->port[port_num - 1];
	enum rxe_mtu mtu;

	mtu = eth_mtu_int_to_enum(ndev_mtu);
	if (!mtu)
		return -EINVAL;

	/* Set the port mtu to min(feasible, preferred) */
	mtu = min_t(enum rxe_mtu, mtu, rxe->pref_mtu);

	port->attr.active_mtu = (enum ib_mtu __force)mtu;
	port->mtu_cap = rxe_mtu_enum_to_int(mtu);

	return 0;
}
Example #3
0
/* called by the modify qp verb */
int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
		     struct ib_udata *udata)
{
	int err;
	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
	union ib_gid sgid;
	struct ib_gid_attr sgid_attr;

	/* TODO should handle error by leaving old resources intact */
	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
		int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);

		free_rd_atomic_resources(qp);

		err = alloc_rd_atomic_resources(qp, max_rd_atomic);
		if (err)
			return err;

		qp->attr.max_rd_atomic = max_rd_atomic;
		atomic_set(&qp->req.rd_atomic, max_rd_atomic);
	}

	if (mask & IB_QP_CUR_STATE)
		qp->attr.cur_qp_state = attr->qp_state;

	if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
		qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;

	if (mask & IB_QP_ACCESS_FLAGS)
		qp->attr.qp_access_flags = attr->qp_access_flags;

	if (mask & IB_QP_PKEY_INDEX)
		qp->attr.pkey_index = attr->pkey_index;

	if (mask & IB_QP_PORT)
		qp->attr.port_num = attr->port_num;

	if (mask & IB_QP_QKEY)
		qp->attr.qkey = attr->qkey;

	if (mask & IB_QP_AV) {
		rcu_read_lock();
		ib_get_cached_gid(&rxe->ib_dev, 1,
				  attr->ah_attr.grh.sgid_index, &sgid,
				  &sgid_attr);
		rcu_read_unlock();
		rxe_av_from_attr(rxe, attr->port_num, &qp->pri_av,
				 &attr->ah_attr);
		qp->pri_av.network_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
		rxe_av_fill_ip_info(rxe, &qp->pri_av, &attr->ah_attr, &sgid);
	}

	if (mask & IB_QP_ALT_PATH) {
		rcu_read_lock();
		ib_get_cached_gid(&rxe->ib_dev, 1,
				  attr->alt_ah_attr.grh.sgid_index, &sgid,
				  &sgid_attr);
		rcu_read_unlock();

		rxe_av_from_attr(rxe, attr->alt_port_num, &qp->alt_av,
				 &attr->alt_ah_attr);
		qp->alt_av.network_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
		rxe_av_fill_ip_info(rxe, &qp->alt_av, &attr->alt_ah_attr, &sgid);
		qp->attr.alt_port_num = attr->alt_port_num;
		qp->attr.alt_pkey_index = attr->alt_pkey_index;
		qp->attr.alt_timeout = attr->alt_timeout;
	}

	if (mask & IB_QP_PATH_MTU) {
		qp->attr.path_mtu = attr->path_mtu;
		qp->mtu = rxe_mtu_enum_to_int((enum rxe_mtu)attr->path_mtu);
	}

	if (mask & IB_QP_TIMEOUT) {
		qp->attr.timeout = attr->timeout;
		if (attr->timeout == 0) {
			qp->qp_timeout_jiffies = 0;
		} else {
			int j = usecs_to_jiffies(4ULL << attr->timeout);

			qp->qp_timeout_jiffies = j ? j : 1;
		}
	}

	if (mask & IB_QP_RETRY_CNT) {
		qp->attr.retry_cnt = attr->retry_cnt;
		qp->comp.retry_cnt = attr->retry_cnt;
		pr_debug("set retry count = %d\n", attr->retry_cnt);
	}

	if (mask & IB_QP_RNR_RETRY) {
		qp->attr.rnr_retry = attr->rnr_retry;
		qp->comp.rnr_retry = attr->rnr_retry;
		pr_debug("set rnr retry count = %d\n", attr->rnr_retry);
	}

	if (mask & IB_QP_RQ_PSN) {
		qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
		qp->resp.psn = qp->attr.rq_psn;
		pr_debug("set resp psn = 0x%x\n", qp->resp.psn);
	}

	if (mask & IB_QP_MIN_RNR_TIMER) {
		qp->attr.min_rnr_timer = attr->min_rnr_timer;
		pr_debug("set min rnr timer = 0x%x\n",
			 attr->min_rnr_timer);
	}

	if (mask & IB_QP_SQ_PSN) {
		qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
		qp->req.psn = qp->attr.sq_psn;
		qp->comp.psn = qp->attr.sq_psn;
		pr_debug("set req psn = 0x%x\n", qp->req.psn);
	}

	if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
		qp->attr.max_dest_rd_atomic =
			__roundup_pow_of_two(attr->max_dest_rd_atomic);
	}

	if (mask & IB_QP_PATH_MIG_STATE)
		qp->attr.path_mig_state = attr->path_mig_state;

	if (mask & IB_QP_DEST_QPN)
		qp->attr.dest_qp_num = attr->dest_qp_num;

	if (mask & IB_QP_STATE) {
		qp->attr.qp_state = attr->qp_state;

		switch (attr->qp_state) {
		case IB_QPS_RESET:
			pr_debug("qp state -> RESET\n");
			rxe_qp_reset(qp);
			break;

		case IB_QPS_INIT:
			pr_debug("qp state -> INIT\n");
			qp->req.state = QP_STATE_INIT;
			qp->resp.state = QP_STATE_INIT;
			break;

		case IB_QPS_RTR:
			pr_debug("qp state -> RTR\n");
			qp->resp.state = QP_STATE_READY;
			break;

		case IB_QPS_RTS:
			pr_debug("qp state -> RTS\n");
			qp->req.state = QP_STATE_READY;
			break;

		case IB_QPS_SQD:
			pr_debug("qp state -> SQD\n");
			rxe_qp_drain(qp);
			break;

		case IB_QPS_SQE:
			pr_warn("qp state -> SQE !!?\n");
			/* Not possible from modify_qp. */
			break;

		case IB_QPS_ERR:
			pr_debug("qp state -> ERR\n");
			rxe_qp_error(qp);
			break;
		}
	}

	return 0;
}
Example #4
0
/* called by the modify qp verb, this routine
   checks all the parameters before making any changes */
int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
		    struct ib_qp_attr *attr, int mask)
{
	enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
					attr->cur_qp_state : qp->attr.qp_state;
	enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
					attr->qp_state : cur_state;

	if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask,
				IB_LINK_LAYER_ETHERNET)) {
		pr_warn("invalid mask or state for qp\n");
		goto err1;
	}

	if (mask & IB_QP_STATE) {
		if (cur_state == IB_QPS_SQD) {
			if (qp->req.state == QP_STATE_DRAIN &&
			    new_state != IB_QPS_ERR)
				goto err1;
		}
	}

	if (mask & IB_QP_PORT) {
		if (attr->port_num < 1 || attr->port_num > rxe->num_ports) {
			pr_warn("invalid port %d\n", attr->port_num);
			goto err1;
		}
	}

	if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap,
					       qp->srq != NULL))
		goto err1;

	if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
		goto err1;

	if (mask & IB_QP_ALT_PATH && rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
		goto err1;

	if (mask & IB_QP_PATH_MTU) {
		struct rxe_port *port = &rxe->port[qp->attr.port_num - 1];
		enum rxe_mtu max_mtu = (enum rxe_mtu __force)port->attr.max_mtu;
		enum rxe_mtu mtu = (enum rxe_mtu __force)attr->path_mtu;

		if (mtu > max_mtu) {
			pr_debug("invalid mtu (%d) > (%d)\n",
				 rxe_mtu_enum_to_int(mtu),
				 rxe_mtu_enum_to_int(max_mtu));
			goto err1;
		}
	}

	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
		if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
			pr_warn("invalid max_rd_atomic %d > %d\n",
				attr->max_rd_atomic,
				rxe->attr.max_qp_rd_atom);
			goto err1;
		}
	}

	if (mask & IB_QP_TIMEOUT) {
		if (attr->timeout > 31) {
			pr_warn("invalid QP timeout %d > 31\n",
				attr->timeout);
			goto err1;
		}
	}

	return 0;

err1:
	return -EINVAL;
}