Beispiel #1
0
void rxe_av_from_attr(u8 port_num, struct rxe_av *av,
		     struct rdma_ah_attr *attr)
{
	memset(av, 0, sizeof(*av));
	memcpy(&av->grh, rdma_ah_read_grh(attr),
	       sizeof(*rdma_ah_read_grh(attr)));
	av->port_num = port_num;
}
Beispiel #2
0
Datei: ah.c Projekt: krzk/linux
static struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev,
				  struct mlx5_ib_ah *ah,
				  struct rdma_ah_attr *ah_attr)
{
	enum ib_gid_type gid_type;
	int err;

	if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
		const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);

		memcpy(ah->av.rgid, &grh->dgid, 16);
		ah->av.grh_gid_fl = cpu_to_be32(grh->flow_label |
						(1 << 30) |
						grh->sgid_index << 20);
		ah->av.hop_limit = grh->hop_limit;
		ah->av.tclass = grh->traffic_class;
	}

	ah->av.stat_rate_sl = (rdma_ah_get_static_rate(ah_attr) << 4);

	if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
		err = mlx5_get_roce_gid_type(dev, ah_attr->port_num,
					     ah_attr->grh.sgid_index,
					     &gid_type);
		if (err)
			return ERR_PTR(err);

		memcpy(ah->av.rmac, ah_attr->roce.dmac,
		       sizeof(ah_attr->roce.dmac));
		ah->av.udp_sport =
		mlx5_get_roce_udp_sport(dev,
					rdma_ah_get_port_num(ah_attr),
					rdma_ah_read_grh(ah_attr)->sgid_index);
		ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0x7) << 1;
		if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
#define MLX5_ECN_ENABLED BIT(1)
			ah->av.tclass |= MLX5_ECN_ENABLED;
	} else {
		ah->av.rlid = cpu_to_be16(rdma_ah_get_dlid(ah_attr));
		ah->av.fl_mlid = rdma_ah_get_path_bits(ah_attr) & 0x7f;
		ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0xf);
	}

	return &ah->ibah;
}
Beispiel #3
0
void rxe_av_fill_ip_info(struct rxe_av *av,
			struct rdma_ah_attr *attr,
			struct ib_gid_attr *sgid_attr,
			union ib_gid *sgid)
{
	rdma_gid2ip(&av->sgid_addr._sockaddr, sgid);
	rdma_gid2ip(&av->dgid_addr._sockaddr, &rdma_ah_read_grh(attr)->dgid);
	av->network_type = ib_gid_to_network_type(sgid_attr->gid_type, sgid);
}
Beispiel #4
0
void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr)
{
	const struct ib_gid_attr *sgid_attr = attr->grh.sgid_attr;

	rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
	rdma_gid2ip((struct sockaddr *)&av->dgid_addr,
		    &rdma_ah_read_grh(attr)->dgid);
	av->network_type = rdma_gid_attr_network_type(sgid_attr);
}
Beispiel #5
0
void rxe_av_from_attr(u8 port_num, struct rxe_av *av,
		     struct rdma_ah_attr *attr)
{
	const struct ib_global_route *grh = rdma_ah_read_grh(attr);

	memset(av, 0, sizeof(*av));
	memcpy(av->grh.dgid.raw, grh->dgid.raw, sizeof(grh->dgid.raw));
	av->grh.flow_label = grh->flow_label;
	av->grh.sgid_index = grh->sgid_index;
	av->grh.hop_limit = grh->hop_limit;
	av->grh.traffic_class = grh->traffic_class;
	av->port_num = port_num;
}
Beispiel #6
0
struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
				 struct rdma_ah_attr *ah_attr,
				 struct ib_udata *udata)
{
	struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device);
	const struct ib_gid_attr *gid_attr;
	struct device *dev = hr_dev->dev;
	struct hns_roce_ah *ah;
	u16 vlan_tag = 0xffff;
	const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
	bool vlan_en = false;

	ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
	if (!ah)
		return ERR_PTR(-ENOMEM);

	/* Get mac address */
	memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);

	gid_attr = ah_attr->grh.sgid_attr;
	if (is_vlan_dev(gid_attr->ndev)) {
		vlan_tag = vlan_dev_vlan_id(gid_attr->ndev);
		vlan_en = true;
	}

	if (vlan_tag < 0x1000)
		vlan_tag |= (rdma_ah_get_sl(ah_attr) &
			     HNS_ROCE_VLAN_SL_BIT_MASK) <<
			     HNS_ROCE_VLAN_SL_SHIFT;

	ah->av.port_pd = cpu_to_be32(to_hr_pd(ibpd)->pdn |
				     (rdma_ah_get_port_num(ah_attr) <<
				     HNS_ROCE_PORT_NUM_SHIFT));
	ah->av.gid_index = grh->sgid_index;
	ah->av.vlan = cpu_to_le16(vlan_tag);
	ah->av.vlan_en = vlan_en;
	dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index,
		ah->av.vlan);

	if (rdma_ah_get_static_rate(ah_attr))
		ah->av.stat_rate = IB_RATE_10_GBPS;

	memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
	ah->av.sl_tclass_flowlabel = cpu_to_le32(rdma_ah_get_sl(ah_attr) <<
						 HNS_ROCE_SL_SHIFT);

	return &ah->ibah;
}
Beispiel #7
0
int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr)
{
	struct rxe_port *port;

	port = &rxe->port;

	if (rdma_ah_get_ah_flags(attr) & IB_AH_GRH) {
		u8 sgid_index = rdma_ah_read_grh(attr)->sgid_index;

		if (sgid_index > port->attr.gid_tbl_len) {
			pr_warn("invalid sgid index = %d\n", sgid_index);
			return -EINVAL;
		}
	}

	return 0;
}
Beispiel #8
0
/*
 *
 * This should be called with the QP r_lock held.
 *
 * The s_lock will be acquired around the qib_migrate_qp() call.
 */
int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr,
		      int has_grh, struct rvt_qp *qp, u32 bth0)
{
	__be64 guid;
	unsigned long flags;

	if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
		if (!has_grh) {
			if (rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
			    IB_AH_GRH)
				goto err;
		} else {
			const struct ib_global_route *grh;

			if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
			      IB_AH_GRH))
				goto err;
			grh = rdma_ah_read_grh(&qp->alt_ah_attr);
			guid = get_sguid(ibp, grh->sgid_index);
			if (!gid_ok(&hdr->u.l.grh.dgid,
				    ibp->rvp.gid_prefix, guid))
				goto err;
			if (!gid_ok(&hdr->u.l.grh.sgid,
			    grh->dgid.global.subnet_prefix,
			    grh->dgid.global.interface_id))
				goto err;
		}
		if (!qib_pkey_ok((u16)bth0,
				 qib_get_pkey(ibp, qp->s_alt_pkey_index))) {
			qib_bad_pkey(ibp,
				     (u16)bth0,
				     (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
				     0, qp->ibqp.qp_num,
				     hdr->lrh[3], hdr->lrh[1]);
			goto err;
		}
		/* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
		if ((be16_to_cpu(hdr->lrh[3]) !=
		     rdma_ah_get_dlid(&qp->alt_ah_attr)) ||
		    ppd_from_ibp(ibp)->port !=
			    rdma_ah_get_port_num(&qp->alt_ah_attr))
			goto err;
		spin_lock_irqsave(&qp->s_lock, flags);
		qib_migrate_qp(qp);
		spin_unlock_irqrestore(&qp->s_lock, flags);
	} else {
Beispiel #9
0
/**
 * pvrdma_create_ah - create an address handle
 * @pd: the protection domain
 * @ah_attr: the attributes of the AH
 * @udata: user data blob
 *
 * @return: the ib_ah pointer on success, otherwise errno.
 */
struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
			       struct ib_udata *udata)
{
	struct pvrdma_dev *dev = to_vdev(pd->device);
	struct pvrdma_ah *ah;
	const struct ib_global_route *grh;
	u8 port_num = rdma_ah_get_port_num(ah_attr);

	if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
		return ERR_PTR(-EINVAL);

	grh = rdma_ah_read_grh(ah_attr);
	if ((ah_attr->type != RDMA_AH_ATTR_TYPE_ROCE)  ||
	    rdma_is_multicast_addr((struct in6_addr *)grh->dgid.raw))
		return ERR_PTR(-EINVAL);

	if (!atomic_add_unless(&dev->num_ahs, 1, dev->dsr->caps.max_ah))
		return ERR_PTR(-ENOMEM);

	ah = kzalloc(sizeof(*ah), GFP_KERNEL);
	if (!ah) {
		atomic_dec(&dev->num_ahs);
		return ERR_PTR(-ENOMEM);
	}

	ah->av.port_pd = to_vpd(pd)->pd_handle | (port_num << 24);
	ah->av.src_path_bits = rdma_ah_get_path_bits(ah_attr);
	ah->av.src_path_bits |= 0x80;
	ah->av.gid_index = grh->sgid_index;
	ah->av.hop_limit = grh->hop_limit;
	ah->av.sl_tclass_flowlabel = (grh->traffic_class << 20) |
				      grh->flow_label;
	memcpy(ah->av.dgid, grh->dgid.raw, 16);
	memcpy(ah->av.dmac, ah_attr->roce.dmac, ETH_ALEN);

	ah->ibah.device = pd->device;
	ah->ibah.pd = pd;
	ah->ibah.uobject = NULL;

	return &ah->ibah;
}
Beispiel #10
0
static int rxe_init_av(struct rxe_dev *rxe, struct rdma_ah_attr *attr,
		       struct rxe_av *av)
{
	int err;
	union ib_gid sgid;
	struct ib_gid_attr sgid_attr;

	err = ib_get_cached_gid(&rxe->ib_dev, rdma_ah_get_port_num(attr),
				rdma_ah_read_grh(attr)->sgid_index, &sgid,
				&sgid_attr);
	if (err) {
		pr_err("Failed to query sgid. err = %d\n", err);
		return err;
	}

	err = rxe_av_from_attr(rxe, rdma_ah_get_port_num(attr), av, attr);
	if (!err)
		err = rxe_av_fill_ip_info(rxe, av, attr, &sgid_attr, &sgid);

	if (sgid_attr.ndev)
		dev_put(sgid_attr.ndev);
	return err;
}
Beispiel #11
0
static inline int qedr_gsi_build_header(struct qedr_dev *dev,
					struct qedr_qp *qp,
					struct ib_send_wr *swr,
					struct ib_ud_header *udh,
					int *roce_mode)
{
	bool has_vlan = false, has_grh_ipv6 = true;
	struct rdma_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr;
	const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
	union ib_gid sgid;
	int send_size = 0;
	u16 vlan_id = 0;
	u16 ether_type;
	struct ib_gid_attr sgid_attr;
	int rc;
	int ip_ver = 0;

	bool has_udp = false;
	int i;

	send_size = 0;
	for (i = 0; i < swr->num_sge; ++i)
		send_size += swr->sg_list[i].length;

	rc = ib_get_cached_gid(qp->ibqp.device, rdma_ah_get_port_num(ah_attr),
			       grh->sgid_index, &sgid, &sgid_attr);
	if (rc) {
		DP_ERR(dev,
		       "gsi post send: failed to get cached GID (port=%d, ix=%d)\n",
		       rdma_ah_get_port_num(ah_attr),
		       grh->sgid_index);
		return rc;
	}

	if (sgid_attr.ndev) {
		vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
		if (vlan_id < VLAN_CFI_MASK)
			has_vlan = true;

		dev_put(sgid_attr.ndev);
	}

	if (!memcmp(&sgid, &zgid, sizeof(sgid))) {
		DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
		       grh->sgid_index);
		return -ENOENT;
	}

	has_udp = (sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
	if (!has_udp) {
		/* RoCE v1 */
		ether_type = ETH_P_IBOE;
		*roce_mode = ROCE_V1;
	} else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
		/* RoCE v2 IPv4 */
		ip_ver = 4;
		ether_type = ETH_P_IP;
		has_grh_ipv6 = false;
		*roce_mode = ROCE_V2_IPV4;
	} else {
		/* RoCE v2 IPv6 */
		ip_ver = 6;
		ether_type = ETH_P_IPV6;
		*roce_mode = ROCE_V2_IPV6;
	}

	rc = ib_ud_header_init(send_size, false, true, has_vlan,
			       has_grh_ipv6, ip_ver, has_udp, 0, udh);
	if (rc) {
		DP_ERR(dev, "gsi post send: failed to init header\n");
		return rc;
	}

	/* ENET + VLAN headers */
	ether_addr_copy(udh->eth.dmac_h, ah_attr->roce.dmac);
	ether_addr_copy(udh->eth.smac_h, dev->ndev->dev_addr);
	if (has_vlan) {
		udh->eth.type = htons(ETH_P_8021Q);
		udh->vlan.tag = htons(vlan_id);
		udh->vlan.type = htons(ether_type);
	} else {
		udh->eth.type = htons(ether_type);
	}

	/* BTH */
	udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED);
	udh->bth.pkey = QEDR_ROCE_PKEY_DEFAULT;
	udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn);
	udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
	udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY;

	/* DETH */
	udh->deth.qkey = htonl(0x80010000);
	udh->deth.source_qpn = htonl(QEDR_GSI_QPN);

	if (has_grh_ipv6) {
		/* GRH / IPv6 header */
		udh->grh.traffic_class = grh->traffic_class;
		udh->grh.flow_label = grh->flow_label;
		udh->grh.hop_limit = grh->hop_limit;
		udh->grh.destination_gid = grh->dgid;
		memcpy(&udh->grh.source_gid.raw, &sgid.raw,
		       sizeof(udh->grh.source_gid.raw));
	} else {
		/* IPv4 header */
		u32 ipv4_addr;

		udh->ip4.protocol = IPPROTO_UDP;
		udh->ip4.tos = htonl(grh->flow_label);
		udh->ip4.frag_off = htons(IP_DF);
		udh->ip4.ttl = grh->hop_limit;

		ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw);
		udh->ip4.saddr = ipv4_addr;
		ipv4_addr = qedr_get_ipv4_from_gid(grh->dgid.raw);
		udh->ip4.daddr = ipv4_addr;
		/* note: checksum is calculated by the device */
	}

	/* UDP */
	if (has_udp) {
		udh->udp.sport = htons(QEDR_ROCE_V2_UDP_SPORT);
		udh->udp.dport = htons(ROCE_V2_UDP_DPORT);
		udh->udp.csum = 0;
		/* UDP length is untouched hence is zero */
	}
	return 0;
}
Beispiel #12
0
/* called by the modify qp verb */
int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
		     struct ib_udata *udata)
{
	int err;
	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
	union ib_gid sgid;
	struct ib_gid_attr sgid_attr;

	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
		int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);

		qp->attr.max_rd_atomic = max_rd_atomic;
		atomic_set(&qp->req.rd_atomic, max_rd_atomic);
	}

	if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
		int max_dest_rd_atomic =
			__roundup_pow_of_two(attr->max_dest_rd_atomic);

		qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;

		free_rd_atomic_resources(qp);

		err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
		if (err)
			return err;
	}

	if (mask & IB_QP_CUR_STATE)
		qp->attr.cur_qp_state = attr->qp_state;

	if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
		qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;

	if (mask & IB_QP_ACCESS_FLAGS)
		qp->attr.qp_access_flags = attr->qp_access_flags;

	if (mask & IB_QP_PKEY_INDEX)
		qp->attr.pkey_index = attr->pkey_index;

	if (mask & IB_QP_PORT)
		qp->attr.port_num = attr->port_num;

	if (mask & IB_QP_QKEY)
		qp->attr.qkey = attr->qkey;

	if (mask & IB_QP_AV) {
		ib_get_cached_gid(&rxe->ib_dev, 1,
				  rdma_ah_read_grh(&attr->ah_attr)->sgid_index,
				  &sgid, &sgid_attr);
		rxe_av_from_attr(attr->port_num, &qp->pri_av, &attr->ah_attr);
		rxe_av_fill_ip_info(&qp->pri_av, &attr->ah_attr,
				    &sgid_attr, &sgid);
		if (sgid_attr.ndev)
			dev_put(sgid_attr.ndev);
	}

	if (mask & IB_QP_ALT_PATH) {
		u8 sgid_index =
			rdma_ah_read_grh(&attr->alt_ah_attr)->sgid_index;

		ib_get_cached_gid(&rxe->ib_dev, 1, sgid_index,
				  &sgid, &sgid_attr);

		rxe_av_from_attr(attr->alt_port_num, &qp->alt_av,
				 &attr->alt_ah_attr);
		rxe_av_fill_ip_info(&qp->alt_av, &attr->alt_ah_attr,
				    &sgid_attr, &sgid);
		if (sgid_attr.ndev)
			dev_put(sgid_attr.ndev);

		qp->attr.alt_port_num = attr->alt_port_num;
		qp->attr.alt_pkey_index = attr->alt_pkey_index;
		qp->attr.alt_timeout = attr->alt_timeout;
	}

	if (mask & IB_QP_PATH_MTU) {
		qp->attr.path_mtu = attr->path_mtu;
		qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
	}

	if (mask & IB_QP_TIMEOUT) {
		qp->attr.timeout = attr->timeout;
		if (attr->timeout == 0) {
			qp->qp_timeout_jiffies = 0;
		} else {
			/* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
			int j = nsecs_to_jiffies(4096ULL << attr->timeout);

			qp->qp_timeout_jiffies = j ? j : 1;
		}
	}

	if (mask & IB_QP_RETRY_CNT) {
		qp->attr.retry_cnt = attr->retry_cnt;
		qp->comp.retry_cnt = attr->retry_cnt;
		pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
			 attr->retry_cnt);
	}

	if (mask & IB_QP_RNR_RETRY) {
		qp->attr.rnr_retry = attr->rnr_retry;
		qp->comp.rnr_retry = attr->rnr_retry;
		pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
			 attr->rnr_retry);
	}

	if (mask & IB_QP_RQ_PSN) {
		qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
		qp->resp.psn = qp->attr.rq_psn;
		pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
			 qp->resp.psn);
	}

	if (mask & IB_QP_MIN_RNR_TIMER) {
		qp->attr.min_rnr_timer = attr->min_rnr_timer;
		pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
			 attr->min_rnr_timer);
	}

	if (mask & IB_QP_SQ_PSN) {
		qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
		qp->req.psn = qp->attr.sq_psn;
		qp->comp.psn = qp->attr.sq_psn;
		pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
	}

	if (mask & IB_QP_PATH_MIG_STATE)
		qp->attr.path_mig_state = attr->path_mig_state;

	if (mask & IB_QP_DEST_QPN)
		qp->attr.dest_qp_num = attr->dest_qp_num;

	if (mask & IB_QP_STATE) {
		qp->attr.qp_state = attr->qp_state;

		switch (attr->qp_state) {
		case IB_QPS_RESET:
			pr_debug("qp#%d state -> RESET\n", qp_num(qp));
			rxe_qp_reset(qp);
			break;

		case IB_QPS_INIT:
			pr_debug("qp#%d state -> INIT\n", qp_num(qp));
			qp->req.state = QP_STATE_INIT;
			qp->resp.state = QP_STATE_INIT;
			break;

		case IB_QPS_RTR:
			pr_debug("qp#%d state -> RTR\n", qp_num(qp));
			qp->resp.state = QP_STATE_READY;
			break;

		case IB_QPS_RTS:
			pr_debug("qp#%d state -> RTS\n", qp_num(qp));
			qp->req.state = QP_STATE_READY;
			break;

		case IB_QPS_SQD:
			pr_debug("qp#%d state -> SQD\n", qp_num(qp));
			rxe_qp_drain(qp);
			break;

		case IB_QPS_SQE:
			pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
			/* Not possible from modify_qp. */
			break;

		case IB_QPS_ERR:
			pr_debug("qp#%d state -> ERR\n", qp_num(qp));
			rxe_qp_error(qp);
			break;
		}
	}

	return 0;
}