bool dst_entry::conf_l2_hdr_and_snd_wqe_eth() { bool ret_val = false; //Maybe we after invalidation so we free the wqe_handler since we are going to build it from scratch if (m_p_send_wqe_handler) { delete m_p_send_wqe_handler; m_p_send_wqe_handler = NULL; } m_p_send_wqe_handler = new wqe_send_handler(); if (!m_p_send_wqe_handler) { dst_logpanic("%s Failed to allocate send WQE handler", to_str().c_str()); } m_p_send_wqe_handler->init_inline_wqe(m_inline_send_wqe, get_sge_lst_4_inline_send(), get_inline_sge_num()); m_p_send_wqe_handler->init_wqe(m_not_inline_send_wqe, get_sge_lst_4_not_inline_send(), 1); net_device_val_eth *netdevice_eth = dynamic_cast<net_device_val_eth*>(m_p_net_dev_val); BULLSEYE_EXCLUDE_BLOCK_START if (netdevice_eth) { BULLSEYE_EXCLUDE_BLOCK_END const L2_address *src = m_p_net_dev_val->get_l2_address(); const L2_address *dst = m_p_neigh_val->get_l2_address(); BULLSEYE_EXCLUDE_BLOCK_START if (src && dst) { BULLSEYE_EXCLUDE_BLOCK_END if (netdevice_eth->get_vlan()) { //vlam interface m_header.configure_vlan_eth_headers(*src, *dst, netdevice_eth->get_vlan()); } else { m_header.configure_eth_headers(*src, *dst); } init_sge(); ret_val = true; } else {
/** * ipath_get_rwqe - copy the next RWQE into the QP's RWQE * @qp: the QP * @wr_id_only: update wr_id only, not SGEs * * Return 0 if no RWQE is available, otherwise return 1. * * Can be called from interrupt level. */ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) { unsigned long flags; struct ipath_rq *rq; struct ipath_rwq *wq; struct ipath_srq *srq; struct ipath_rwqe *wqe; void (*handler)(struct ib_event *, void *); u32 tail; int ret; if (qp->ibqp.srq) { srq = to_isrq(qp->ibqp.srq); handler = srq->ibsrq.event_handler; rq = &srq->rq; } else { srq = NULL; handler = NULL; rq = &qp->r_rq; } spin_lock_irqsave(&rq->lock, flags); wq = rq->wq; tail = wq->tail; /* Validate tail before using it since it is user writable. */ if (tail >= rq->size) tail = 0; do { if (unlikely(tail == wq->head)) { spin_unlock_irqrestore(&rq->lock, flags); ret = 0; goto bail; } wqe = get_rwqe_ptr(rq, tail); if (++tail >= rq->size) tail = 0; } while (!wr_id_only && !init_sge(qp, wqe)); qp->r_wr_id = wqe->wr_id; wq->tail = tail; ret = 1; qp->r_wrid_valid = 1; if (handler) { u32 n; /* * validate head pointer value and compute * the number of remaining WQEs. */ n = wq->head; if (n >= rq->size) n = 0; if (n < tail) n += rq->size - tail; else n -= tail; if (n < srq->limit) { struct ib_event ev; srq->limit = 0; spin_unlock_irqrestore(&rq->lock, flags); ev.device = qp->ibqp.device; ev.element.srq = qp->ibqp.srq; ev.event = IB_EVENT_SRQ_LIMIT_REACHED; handler(&ev, srq->ibsrq.srq_context); goto bail; } } spin_unlock_irqrestore(&rq->lock, flags); bail: return ret; }
/** * hfi1_get_rwqe - copy the next RWQE into the QP's RWQE * @qp: the QP * @wr_id_only: update qp->r_wr_id only, not qp->r_sge * * Return -1 if there is a local error, 0 if no RWQE is available, * otherwise return 1. * * Can be called from interrupt level. */ int hfi1_get_rwqe(struct hfi1_qp *qp, int wr_id_only) { unsigned long flags; struct hfi1_rq *rq; struct hfi1_rwq *wq; struct hfi1_srq *srq; struct hfi1_rwqe *wqe; void (*handler)(struct ib_event *, void *); u32 tail; int ret; if (qp->ibqp.srq) { srq = to_isrq(qp->ibqp.srq); handler = srq->ibsrq.event_handler; rq = &srq->rq; } else { srq = NULL; handler = NULL; rq = &qp->r_rq; } spin_lock_irqsave(&rq->lock, flags); if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK)) { ret = 0; goto unlock; } wq = rq->wq; tail = wq->tail; /* Validate tail before using it since it is user writable. */ if (tail >= rq->size) tail = 0; if (unlikely(tail == wq->head)) { ret = 0; goto unlock; } /* Make sure entry is read after head index is read. */ smp_rmb(); wqe = get_rwqe_ptr(rq, tail); /* * Even though we update the tail index in memory, the verbs * consumer is not supposed to post more entries until a * completion is generated. */ if (++tail >= rq->size) tail = 0; wq->tail = tail; if (!wr_id_only && !init_sge(qp, wqe)) { ret = -1; goto unlock; } qp->r_wr_id = wqe->wr_id; ret = 1; set_bit(HFI1_R_WRID_VALID, &qp->r_aflags); if (handler) { u32 n; /* * Validate head pointer value and compute * the number of remaining WQEs. */ n = wq->head; if (n >= rq->size) n = 0; if (n < tail) n += rq->size - tail; else n -= tail; if (n < srq->limit) { struct ib_event ev; srq->limit = 0; spin_unlock_irqrestore(&rq->lock, flags); ev.device = qp->ibqp.device; ev.element.srq = qp->ibqp.srq; ev.event = IB_EVENT_SRQ_LIMIT_REACHED; handler(&ev, srq->ibsrq.srq_context); goto bail; } } unlock: spin_unlock_irqrestore(&rq->lock, flags); bail: return ret; }