Exemplo n.º 1
0
static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
				    struct mlx4_ib_ah *ah)
{
	struct mlx4_ib_dev *ibdev = to_mdev(pd->device);
	struct mlx4_dev *dev = ibdev->dev;
	int is_mcast = 0;
	struct in6_addr in6;
	u16 vlan_tag = 0xffff;
	union ib_gid sgid;
	struct ib_gid_attr gid_attr;
	int ret;

	memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
	if (rdma_is_multicast_addr(&in6)) {
		is_mcast = 1;
		rdma_get_mcast_mac(&in6, ah->av.eth.mac);
	} else {
		memcpy(ah->av.eth.mac, ah_attr->dmac, ETH_ALEN);
	}
	ret = ib_get_cached_gid(pd->device, ah_attr->port_num,
				ah_attr->grh.sgid_index, &sgid, &gid_attr);
	if (ret)
		return ERR_PTR(ret);
	eth_zero_addr(ah->av.eth.s_mac);
	if (gid_attr.ndev) {
		if (is_vlan_dev(gid_attr.ndev))
			vlan_tag = vlan_dev_vlan_id(gid_attr.ndev);
		memcpy(ah->av.eth.s_mac, gid_attr.ndev->dev_addr, ETH_ALEN);
		dev_put(gid_attr.ndev);
	}
	if (vlan_tag < 0x1000)
		vlan_tag |= (ah_attr->sl & 7) << 13;
	ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
	ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
	if (ret < 0)
		return ERR_PTR(ret);
	ah->av.eth.gid_index = ret;
	ah->av.eth.vlan = cpu_to_be16(vlan_tag);
	ah->av.eth.hop_limit = ah_attr->grh.hop_limit;
	if (ah_attr->static_rate) {
		ah->av.eth.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
		while (ah->av.eth.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
		       !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support))
			--ah->av.eth.stat_rate;
	}

	/*
	 * HW requires multicast LID so we just choose one.
	 */
	if (is_mcast)
		ah->av.ib.dlid = cpu_to_be16(0xc000);

	memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16);
	ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29);

	return &ah->ibah;
}
Exemplo n.º 2
0
static int rxe_query_gid(struct ib_device *device,
			 u8 port_num, int index, union ib_gid *gid)
{
	int ret;

	if (index > RXE_PORT_GID_TBL_LEN)
		return -EINVAL;

	ret = ib_get_cached_gid(device, port_num, index, gid, NULL);
	if (ret == -EAGAIN) {
		memcpy(gid, &zgid, sizeof(*gid));
		return 0;
	}

	return ret;
}
Exemplo n.º 3
0
static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
				    struct mlx4_ib_ah *ah)
{
	struct mlx4_ib_dev *ibdev = to_mdev(pd->device);
	struct mlx4_dev *dev = ibdev->dev;
	union ib_gid sgid;
	u8 mac[6];
	int err;
	int is_mcast;
	u16 vlan_tag;

	err = mlx4_ib_resolve_grh(ibdev, ah_attr, mac, &is_mcast, ah_attr->port_num);
	if (err)
		return ERR_PTR(err);

	memcpy(ah->av.eth.mac, mac, 6);
	err = ib_get_cached_gid(pd->device, ah_attr->port_num, ah_attr->grh.sgid_index, &sgid);
	if (err)
		return ERR_PTR(err);
	vlan_tag = rdma_get_vlan_id(&sgid);
	if (vlan_tag < 0x1000)
		vlan_tag |= (ah_attr->sl & 7) << 13;
	ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
	ah->av.eth.gid_index = ah_attr->grh.sgid_index;
	ah->av.eth.vlan = cpu_to_be16(vlan_tag);
	if (ah_attr->static_rate) {
		ah->av.eth.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
		while (ah->av.eth.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
		       !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support))
			--ah->av.eth.stat_rate;
	}

	/*
	 * HW requires multicast LID so we just choose one.
	 */
	if (is_mcast)
		ah->av.ib.dlid = cpu_to_be16(0xc000);

	memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16);
	ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29);

	return &ah->ibah;
}
Exemplo n.º 4
0
static int rxe_init_av(struct rxe_dev *rxe, struct ib_ah_attr *attr,
		       struct rxe_av *av)
{
	int err;
	union ib_gid sgid;
	struct ib_gid_attr sgid_attr;

	err = ib_get_cached_gid(&rxe->ib_dev, attr->port_num,
				attr->grh.sgid_index, &sgid,
				&sgid_attr);
	if (err) {
		pr_err("Failed to query sgid. err = %d\n", err);
		return err;
	}

	err = rxe_av_from_attr(rxe, attr->port_num, av, attr);
	if (!err)
		err = rxe_av_fill_ip_info(rxe, av, attr, &sgid_attr, &sgid);

	if (sgid_attr.ndev)
		dev_put(sgid_attr.ndev);
	return err;
}
Exemplo n.º 5
0
/* called by the modify qp verb */
int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
		     struct ib_udata *udata)
{
	int err;
	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
	union ib_gid sgid;
	struct ib_gid_attr sgid_attr;

	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
		int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);

		qp->attr.max_rd_atomic = max_rd_atomic;
		atomic_set(&qp->req.rd_atomic, max_rd_atomic);
	}

	if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
		int max_dest_rd_atomic =
			__roundup_pow_of_two(attr->max_dest_rd_atomic);

		qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;

		free_rd_atomic_resources(qp);

		err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
		if (err)
			return err;
	}

	if (mask & IB_QP_CUR_STATE)
		qp->attr.cur_qp_state = attr->qp_state;

	if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
		qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;

	if (mask & IB_QP_ACCESS_FLAGS)
		qp->attr.qp_access_flags = attr->qp_access_flags;

	if (mask & IB_QP_PKEY_INDEX)
		qp->attr.pkey_index = attr->pkey_index;

	if (mask & IB_QP_PORT)
		qp->attr.port_num = attr->port_num;

	if (mask & IB_QP_QKEY)
		qp->attr.qkey = attr->qkey;

	if (mask & IB_QP_AV) {
		ib_get_cached_gid(&rxe->ib_dev, 1,
				  attr->ah_attr.grh.sgid_index, &sgid,
				  &sgid_attr);
		rxe_av_from_attr(rxe, attr->port_num, &qp->pri_av,
				 &attr->ah_attr);
		rxe_av_fill_ip_info(rxe, &qp->pri_av, &attr->ah_attr,
				    &sgid_attr, &sgid);
		if (sgid_attr.ndev)
			dev_put(sgid_attr.ndev);
	}

	if (mask & IB_QP_ALT_PATH) {
		ib_get_cached_gid(&rxe->ib_dev, 1,
				  attr->alt_ah_attr.grh.sgid_index, &sgid,
				  &sgid_attr);

		rxe_av_from_attr(rxe, attr->alt_port_num, &qp->alt_av,
				 &attr->alt_ah_attr);
		rxe_av_fill_ip_info(rxe, &qp->alt_av, &attr->alt_ah_attr,
				    &sgid_attr, &sgid);
		if (sgid_attr.ndev)
			dev_put(sgid_attr.ndev);

		qp->attr.alt_port_num = attr->alt_port_num;
		qp->attr.alt_pkey_index = attr->alt_pkey_index;
		qp->attr.alt_timeout = attr->alt_timeout;
	}

	if (mask & IB_QP_PATH_MTU) {
		qp->attr.path_mtu = attr->path_mtu;
		qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
	}

	if (mask & IB_QP_TIMEOUT) {
		qp->attr.timeout = attr->timeout;
		if (attr->timeout == 0) {
			qp->qp_timeout_jiffies = 0;
		} else {
			/* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
			int j = nsecs_to_jiffies(4096ULL << attr->timeout);

			qp->qp_timeout_jiffies = j ? j : 1;
		}
	}

	if (mask & IB_QP_RETRY_CNT) {
		qp->attr.retry_cnt = attr->retry_cnt;
		qp->comp.retry_cnt = attr->retry_cnt;
		pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
			 attr->retry_cnt);
	}

	if (mask & IB_QP_RNR_RETRY) {
		qp->attr.rnr_retry = attr->rnr_retry;
		qp->comp.rnr_retry = attr->rnr_retry;
		pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
			 attr->rnr_retry);
	}

	if (mask & IB_QP_RQ_PSN) {
		qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
		qp->resp.psn = qp->attr.rq_psn;
		pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
			 qp->resp.psn);
	}

	if (mask & IB_QP_MIN_RNR_TIMER) {
		qp->attr.min_rnr_timer = attr->min_rnr_timer;
		pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
			 attr->min_rnr_timer);
	}

	if (mask & IB_QP_SQ_PSN) {
		qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
		qp->req.psn = qp->attr.sq_psn;
		qp->comp.psn = qp->attr.sq_psn;
		pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
	}

	if (mask & IB_QP_PATH_MIG_STATE)
		qp->attr.path_mig_state = attr->path_mig_state;

	if (mask & IB_QP_DEST_QPN)
		qp->attr.dest_qp_num = attr->dest_qp_num;

	if (mask & IB_QP_STATE) {
		qp->attr.qp_state = attr->qp_state;

		switch (attr->qp_state) {
		case IB_QPS_RESET:
			pr_debug("qp#%d state -> RESET\n", qp_num(qp));
			rxe_qp_reset(qp);
			break;

		case IB_QPS_INIT:
			pr_debug("qp#%d state -> INIT\n", qp_num(qp));
			qp->req.state = QP_STATE_INIT;
			qp->resp.state = QP_STATE_INIT;
			break;

		case IB_QPS_RTR:
			pr_debug("qp#%d state -> RTR\n", qp_num(qp));
			qp->resp.state = QP_STATE_READY;
			break;

		case IB_QPS_RTS:
			pr_debug("qp#%d state -> RTS\n", qp_num(qp));
			qp->req.state = QP_STATE_READY;
			break;

		case IB_QPS_SQD:
			pr_debug("qp#%d state -> SQD\n", qp_num(qp));
			rxe_qp_drain(qp);
			break;

		case IB_QPS_SQE:
			pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
			/* Not possible from modify_qp. */
			break;

		case IB_QPS_ERR:
			pr_debug("qp#%d state -> ERR\n", qp_num(qp));
			rxe_qp_error(qp);
			break;
		}
	}

	return 0;
}
Exemplo n.º 6
0
static inline int qedr_gsi_build_header(struct qedr_dev *dev,
					struct qedr_qp *qp,
					struct ib_send_wr *swr,
					struct ib_ud_header *udh,
					int *roce_mode)
{
	bool has_vlan = false, has_grh_ipv6 = true;
	struct rdma_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr;
	const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
	union ib_gid sgid;
	int send_size = 0;
	u16 vlan_id = 0;
	u16 ether_type;
	struct ib_gid_attr sgid_attr;
	int rc;
	int ip_ver = 0;

	bool has_udp = false;
	int i;

	send_size = 0;
	for (i = 0; i < swr->num_sge; ++i)
		send_size += swr->sg_list[i].length;

	rc = ib_get_cached_gid(qp->ibqp.device, rdma_ah_get_port_num(ah_attr),
			       grh->sgid_index, &sgid, &sgid_attr);
	if (rc) {
		DP_ERR(dev,
		       "gsi post send: failed to get cached GID (port=%d, ix=%d)\n",
		       rdma_ah_get_port_num(ah_attr),
		       grh->sgid_index);
		return rc;
	}

	if (sgid_attr.ndev) {
		vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
		if (vlan_id < VLAN_CFI_MASK)
			has_vlan = true;

		dev_put(sgid_attr.ndev);
	}

	if (!memcmp(&sgid, &zgid, sizeof(sgid))) {
		DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
		       grh->sgid_index);
		return -ENOENT;
	}

	has_udp = (sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
	if (!has_udp) {
		/* RoCE v1 */
		ether_type = ETH_P_IBOE;
		*roce_mode = ROCE_V1;
	} else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
		/* RoCE v2 IPv4 */
		ip_ver = 4;
		ether_type = ETH_P_IP;
		has_grh_ipv6 = false;
		*roce_mode = ROCE_V2_IPV4;
	} else {
		/* RoCE v2 IPv6 */
		ip_ver = 6;
		ether_type = ETH_P_IPV6;
		*roce_mode = ROCE_V2_IPV6;
	}

	rc = ib_ud_header_init(send_size, false, true, has_vlan,
			       has_grh_ipv6, ip_ver, has_udp, 0, udh);
	if (rc) {
		DP_ERR(dev, "gsi post send: failed to init header\n");
		return rc;
	}

	/* ENET + VLAN headers */
	ether_addr_copy(udh->eth.dmac_h, ah_attr->roce.dmac);
	ether_addr_copy(udh->eth.smac_h, dev->ndev->dev_addr);
	if (has_vlan) {
		udh->eth.type = htons(ETH_P_8021Q);
		udh->vlan.tag = htons(vlan_id);
		udh->vlan.type = htons(ether_type);
	} else {
		udh->eth.type = htons(ether_type);
	}

	/* BTH */
	udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED);
	udh->bth.pkey = QEDR_ROCE_PKEY_DEFAULT;
	udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn);
	udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
	udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY;

	/* DETH */
	udh->deth.qkey = htonl(0x80010000);
	udh->deth.source_qpn = htonl(QEDR_GSI_QPN);

	if (has_grh_ipv6) {
		/* GRH / IPv6 header */
		udh->grh.traffic_class = grh->traffic_class;
		udh->grh.flow_label = grh->flow_label;
		udh->grh.hop_limit = grh->hop_limit;
		udh->grh.destination_gid = grh->dgid;
		memcpy(&udh->grh.source_gid.raw, &sgid.raw,
		       sizeof(udh->grh.source_gid.raw));
	} else {
		/* IPv4 header */
		u32 ipv4_addr;

		udh->ip4.protocol = IPPROTO_UDP;
		udh->ip4.tos = htonl(grh->flow_label);
		udh->ip4.frag_off = htons(IP_DF);
		udh->ip4.ttl = grh->hop_limit;

		ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw);
		udh->ip4.saddr = ipv4_addr;
		ipv4_addr = qedr_get_ipv4_from_gid(grh->dgid.raw);
		udh->ip4.daddr = ipv4_addr;
		/* note: checksum is calculated by the device */
	}

	/* UDP */
	if (has_udp) {
		udh->udp.sport = htons(QEDR_ROCE_V2_UDP_SPORT);
		udh->udp.dport = htons(ROCE_V2_UDP_DPORT);
		udh->udp.csum = 0;
		/* UDP length is untouched hence is zero */
	}
	return 0;
}
Exemplo n.º 7
0
struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
			       struct ib_udata *udata)
{
	u32 *ahid_addr;
	int status;
	struct ocrdma_ah *ah;
	bool isvlan = false;
	u16 vlan_tag = 0xffff;
	struct ib_gid_attr sgid_attr;
	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
	union ib_gid sgid;

	if (!(attr->ah_flags & IB_AH_GRH))
		return ERR_PTR(-EINVAL);

	if (atomic_cmpxchg(&dev->update_sl, 1, 0))
		ocrdma_init_service_level(dev);

	ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
	if (!ah)
		return ERR_PTR(-ENOMEM);

	status = ocrdma_alloc_av(dev, ah);
	if (status)
		goto av_err;

	status = ib_get_cached_gid(&dev->ibdev, 1, attr->grh.sgid_index, &sgid,
				   &sgid_attr);
	if (status) {
		pr_err("%s(): Failed to query sgid, status = %d\n",
		      __func__, status);
		goto av_conf_err;
	}
	if (sgid_attr.ndev) {
		if (is_vlan_dev(sgid_attr.ndev))
			vlan_tag = vlan_dev_vlan_id(sgid_attr.ndev);
		dev_put(sgid_attr.ndev);
	}
	/* Get network header type for this GID */
	ah->hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);

	if ((pd->uctx) &&
	    (!rdma_is_multicast_addr((struct in6_addr *)attr->grh.dgid.raw)) &&
	    (!rdma_link_local_addr((struct in6_addr *)attr->grh.dgid.raw))) {
		status = rdma_addr_find_l2_eth_by_grh(&sgid, &attr->grh.dgid,
						      attr->dmac, &vlan_tag,
						      &sgid_attr.ndev->ifindex,
						      NULL);
		if (status) {
			pr_err("%s(): Failed to resolve dmac from gid." 
				"status = %d\n", __func__, status);
			goto av_conf_err;
		}
	}

	status = set_av_attr(dev, ah, attr, &sgid, pd->id, &isvlan, vlan_tag);
	if (status)
		goto av_conf_err;

	/* if pd is for the user process, pass the ah_id to user space */
	if ((pd->uctx) && (pd->uctx->ah_tbl.va)) {
		ahid_addr = pd->uctx->ah_tbl.va + attr->dlid;
		*ahid_addr = 0;
		*ahid_addr |= ah->id & OCRDMA_AH_ID_MASK;
		if (ocrdma_is_udp_encap_supported(dev)) {
			*ahid_addr |= ((u32)ah->hdr_type &
				       OCRDMA_AH_L3_TYPE_MASK) <<
				       OCRDMA_AH_L3_TYPE_SHIFT;
		}
		if (isvlan)
			*ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK <<
				       OCRDMA_AH_VLAN_VALID_SHIFT);
	}

	return &ah->ibah;

av_conf_err:
	ocrdma_free_av(dev, ah);
av_err:
	kfree(ah);
	return ERR_PTR(status);
}