예제 #1
0
/*
 * init_qp_queues initializes/constructs r/squeue and registers queue pages.
 */
static inline int init_qp_queues(struct ehca_shca *shca,
                                 struct ehca_qp *my_qp,
                                 int nr_sq_pages,
                                 int nr_rq_pages,
                                 int swqe_size,
                                 int rwqe_size,
                                 int nr_send_sges, int nr_receive_sges)
{
    int ret, cnt, ipz_rc;
    void *vpage;
    u64 rpage, h_ret;
    struct ib_device *ib_dev = &shca->ib_device;
    struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle;

    ipz_rc = ipz_queue_ctor(&my_qp->ipz_squeue,
                            nr_sq_pages,
                            EHCA_PAGESIZE, swqe_size, nr_send_sges);
    if (!ipz_rc) {
        ehca_err(ib_dev,"Cannot allocate page for squeue. ipz_rc=%x",
                 ipz_rc);
        return -EBUSY;
    }

    ipz_rc = ipz_queue_ctor(&my_qp->ipz_rqueue,
                            nr_rq_pages,
                            EHCA_PAGESIZE, rwqe_size, nr_receive_sges);
    if (!ipz_rc) {
        ehca_err(ib_dev, "Cannot allocate page for rqueue. ipz_rc=%x",
                 ipz_rc);
        ret = -EBUSY;
        goto init_qp_queues0;
    }
    /* register SQ pages */
    for (cnt = 0; cnt < nr_sq_pages; cnt++) {
        vpage = ipz_qpageit_get_inc(&my_qp->ipz_squeue);
        if (!vpage) {
            ehca_err(ib_dev, "SQ ipz_qpageit_get_inc() "
                     "failed p_vpage= %p", vpage);
            ret = -EINVAL;
            goto init_qp_queues1;
        }
        rpage = virt_to_abs(vpage);

        h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
                                         my_qp->ipz_qp_handle,
                                         &my_qp->pf, 0, 0,
                                         rpage, 1,
                                         my_qp->galpas.kernel);
        if (h_ret < H_SUCCESS) {
            ehca_err(ib_dev, "SQ hipz_qp_register_rpage()"
                     " failed rc=%lx", h_ret);
            ret = ehca2ib_return_code(h_ret);
            goto init_qp_queues1;
        }
    }

    ipz_qeit_reset(&my_qp->ipz_squeue);

    /* register RQ pages */
    for (cnt = 0; cnt < nr_rq_pages; cnt++) {
        vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
        if (!vpage) {
            ehca_err(ib_dev, "RQ ipz_qpageit_get_inc() "
                     "failed p_vpage = %p", vpage);
            ret = -EINVAL;
            goto init_qp_queues1;
        }

        rpage = virt_to_abs(vpage);

        h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
                                         my_qp->ipz_qp_handle,
                                         &my_qp->pf, 0, 1,
                                         rpage, 1,my_qp->galpas.kernel);
        if (h_ret < H_SUCCESS) {
            ehca_err(ib_dev, "RQ hipz_qp_register_rpage() failed "
                     "rc=%lx", h_ret);
            ret = ehca2ib_return_code(h_ret);
            goto init_qp_queues1;
        }
        if (cnt == (nr_rq_pages - 1)) {	/* last page! */
            if (h_ret != H_SUCCESS) {
                ehca_err(ib_dev, "RQ hipz_qp_register_rpage() "
                         "h_ret= %lx ", h_ret);
                ret = ehca2ib_return_code(h_ret);
                goto init_qp_queues1;
            }
            vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
            if (vpage) {
                ehca_err(ib_dev, "ipz_qpageit_get_inc() "
                         "should not succeed vpage=%p", vpage);
                ret = -EINVAL;
                goto init_qp_queues1;
            }
        } else {
            if (h_ret != H_PAGE_REGISTERED) {
                ehca_err(ib_dev, "RQ hipz_qp_register_rpage() "
                         "h_ret= %lx ", h_ret);
                ret = ehca2ib_return_code(h_ret);
                goto init_qp_queues1;
            }
        }
    }

    ipz_qeit_reset(&my_qp->ipz_rqueue);

    return 0;

init_qp_queues1:
    ipz_queue_dtor(&my_qp->ipz_rqueue);
init_qp_queues0:
    ipz_queue_dtor(&my_qp->ipz_squeue);
    return ret;
}
예제 #2
0
struct ib_qp *ehca_create_qp(struct ib_pd *pd,
                             struct ib_qp_init_attr *init_attr,
                             struct ib_udata *udata)
{
    static int da_rc_msg_size[]= { 128, 256, 512, 1024, 2048, 4096 };
    static int da_ud_sq_msg_size[]= { 128, 384, 896, 1920, 3968 };
    struct ehca_qp *my_qp;
    struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
    struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
                                          ib_device);
    struct ib_ucontext *context = NULL;
    u64 h_ret;
    int max_send_sge, max_recv_sge, ret;

    /* h_call's out parameters */
    struct ehca_alloc_qp_parms parms;
    u32 swqe_size = 0, rwqe_size = 0;
    u8 daqp_completion, isdaqp;
    unsigned long flags;

    if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR &&
            init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
        ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed",
                 init_attr->sq_sig_type);
        return ERR_PTR(-EINVAL);
    }

    /* save daqp completion bits */
    daqp_completion = init_attr->qp_type & 0x60;
    /* save daqp bit */
    isdaqp = (init_attr->qp_type & 0x80) ? 1 : 0;
    init_attr->qp_type = init_attr->qp_type & 0x1F;

    if (init_attr->qp_type != IB_QPT_UD &&
            init_attr->qp_type != IB_QPT_SMI &&
            init_attr->qp_type != IB_QPT_GSI &&
            init_attr->qp_type != IB_QPT_UC &&
            init_attr->qp_type != IB_QPT_RC) {
        ehca_err(pd->device, "wrong QP Type=%x", init_attr->qp_type);
        return ERR_PTR(-EINVAL);
    }
    if ((init_attr->qp_type != IB_QPT_RC && init_attr->qp_type != IB_QPT_UD)
            && isdaqp) {
        ehca_err(pd->device, "unsupported LL QP Type=%x",
                 init_attr->qp_type);
        return ERR_PTR(-EINVAL);
    } else if (init_attr->qp_type == IB_QPT_RC && isdaqp &&
               (init_attr->cap.max_send_wr > 255 ||
                init_attr->cap.max_recv_wr > 255 )) {
        ehca_err(pd->device, "Invalid Number of max_sq_wr =%x "
                 "or max_rq_wr=%x for QP Type=%x",
                 init_attr->cap.max_send_wr,
                 init_attr->cap.max_recv_wr,init_attr->qp_type);
        return ERR_PTR(-EINVAL);
    } else if (init_attr->qp_type == IB_QPT_UD && isdaqp &&
               init_attr->cap.max_send_wr > 255) {
        ehca_err(pd->device,
                 "Invalid Number of max_send_wr=%x for UD QP_TYPE=%x",
                 init_attr->cap.max_send_wr, init_attr->qp_type);
        return ERR_PTR(-EINVAL);
    }

    if (pd->uobject && udata)
        context = pd->uobject->context;

    my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
    if (!my_qp) {
        ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
        return ERR_PTR(-ENOMEM);
    }

    memset (&parms, 0, sizeof(struct ehca_alloc_qp_parms));
    spin_lock_init(&my_qp->spinlock_s);
    spin_lock_init(&my_qp->spinlock_r);

    my_qp->recv_cq =
        container_of(init_attr->recv_cq, struct ehca_cq, ib_cq);
    my_qp->send_cq =
        container_of(init_attr->send_cq, struct ehca_cq, ib_cq);

    my_qp->init_attr = *init_attr;

    do {
        if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) {
            ret = -ENOMEM;
            ehca_err(pd->device, "Can't reserve idr resources.");
            goto create_qp_exit0;
        }

        spin_lock_irqsave(&ehca_qp_idr_lock, flags);
        ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token);
        spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);

    } while (ret == -EAGAIN);

    if (ret) {
        ret = -ENOMEM;
        ehca_err(pd->device, "Can't allocate new idr entry.");
        goto create_qp_exit0;
    }

    parms.servicetype = ibqptype2servicetype(init_attr->qp_type);
    if (parms.servicetype < 0) {
        ret = -EINVAL;
        ehca_err(pd->device, "Invalid qp_type=%x", init_attr->qp_type);
        goto create_qp_exit0;
    }

    if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
        parms.sigtype = HCALL_SIGT_EVERY;
    else
        parms.sigtype = HCALL_SIGT_BY_WQE;

    /* UD_AV CIRCUMVENTION */
    max_send_sge = init_attr->cap.max_send_sge;
    max_recv_sge = init_attr->cap.max_recv_sge;
    if (IB_QPT_UD == init_attr->qp_type ||
            IB_QPT_GSI == init_attr->qp_type ||
            IB_QPT_SMI == init_attr->qp_type) {
        max_send_sge += 2;
        max_recv_sge += 2;
    }

    parms.ipz_eq_handle = shca->eq.ipz_eq_handle;
    parms.daqp_ctrl = isdaqp | daqp_completion;
    parms.pd = my_pd->fw_pd;
    parms.max_recv_sge = max_recv_sge;
    parms.max_send_sge = max_send_sge;

    h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, my_qp, &parms);

    if (h_ret != H_SUCCESS) {
        ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lx",
                 h_ret);
        ret = ehca2ib_return_code(h_ret);
        goto create_qp_exit1;
    }

    switch (init_attr->qp_type) {
    case IB_QPT_RC:
        if (isdaqp == 0) {
            swqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[
                                     (parms.act_nr_send_sges)]);
            rwqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[
                                     (parms.act_nr_recv_sges)]);
        } else { /* for daqp we need to use msg size, not wqe size */
예제 #3
0
int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
{
	int i, ret = 0;
	struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
					      ib_device);
	struct hipz_query_hca *rblock;

	static const u32 cap_mapping[] = {
		IB_DEVICE_RESIZE_MAX_WR,      HCA_CAP_WQE_RESIZE,
		IB_DEVICE_BAD_PKEY_CNTR,      HCA_CAP_BAD_P_KEY_CTR,
		IB_DEVICE_BAD_QKEY_CNTR,      HCA_CAP_Q_KEY_VIOL_CTR,
		IB_DEVICE_RAW_MULTI,          HCA_CAP_RAW_PACKET_MCAST,
		IB_DEVICE_AUTO_PATH_MIG,      HCA_CAP_AUTO_PATH_MIG,
		IB_DEVICE_CHANGE_PHY_PORT,    HCA_CAP_SQD_RTS_PORT_CHANGE,
		IB_DEVICE_UD_AV_PORT_ENFORCE, HCA_CAP_AH_PORT_NR_CHECK,
		IB_DEVICE_CURR_QP_STATE_MOD,  HCA_CAP_CUR_QP_STATE_MOD,
		IB_DEVICE_SHUTDOWN_PORT,      HCA_CAP_SHUTDOWN_PORT,
		IB_DEVICE_INIT_TYPE,          HCA_CAP_INIT_TYPE,
		IB_DEVICE_PORT_ACTIVE_EVENT,  HCA_CAP_PORT_ACTIVE_EVENT,
	};

	rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
	if (!rblock) {
		ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
		return -ENOMEM;
	}

	if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
		ehca_err(&shca->ib_device, "Can't query device properties");
		ret = -EINVAL;
		goto query_device1;
	}

	memset(props, 0, sizeof(struct ib_device_attr));
	props->page_size_cap   = shca->hca_cap_mr_pgsize;
	props->fw_ver          = rblock->hw_ver;
	props->max_mr_size     = rblock->max_mr_size;
	props->vendor_id       = rblock->vendor_id >> 8;
	props->vendor_part_id  = rblock->vendor_part_id >> 16;
	props->hw_ver          = rblock->hw_ver;
	props->max_qp          = limit_uint(rblock->max_qp);
	props->max_qp_wr       = limit_uint(rblock->max_wqes_wq);
	props->max_sge         = limit_uint(rblock->max_sge);
	props->max_sge_rd      = limit_uint(rblock->max_sge_rd);
	props->max_cq          = limit_uint(rblock->max_cq);
	props->max_cqe         = limit_uint(rblock->max_cqe);
	props->max_mr          = limit_uint(rblock->max_mr);
	props->max_mw          = limit_uint(rblock->max_mw);
	props->max_pd          = limit_uint(rblock->max_pd);
	props->max_ah          = limit_uint(rblock->max_ah);
	props->max_ee          = limit_uint(rblock->max_rd_ee_context);
	props->max_rdd         = limit_uint(rblock->max_rd_domain);
	props->max_fmr         = limit_uint(rblock->max_mr);
	props->max_qp_rd_atom  = limit_uint(rblock->max_rr_qp);
	props->max_ee_rd_atom  = limit_uint(rblock->max_rr_ee_context);
	props->max_res_rd_atom = limit_uint(rblock->max_rr_hca);
	props->max_qp_init_rd_atom = limit_uint(rblock->max_act_wqs_qp);
	props->max_ee_init_rd_atom = limit_uint(rblock->max_act_wqs_ee_context);

	if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
		props->max_srq         = limit_uint(props->max_qp);
		props->max_srq_wr      = limit_uint(props->max_qp_wr);
		props->max_srq_sge     = 3;
	}

	props->max_pkeys           = 16;
	/* Some FW versions say 0 here; insert sensible value in that case */
	props->local_ca_ack_delay  = rblock->local_ca_ack_delay ?
		min_t(u8, rblock->local_ca_ack_delay, 255) : 12;
	props->max_raw_ipv6_qp     = limit_uint(rblock->max_raw_ipv6_qp);
	props->max_raw_ethy_qp     = limit_uint(rblock->max_raw_ethy_qp);
	props->max_mcast_grp       = limit_uint(rblock->max_mcast_grp);
	props->max_mcast_qp_attach = limit_uint(rblock->max_mcast_qp_attach);
	props->max_total_mcast_qp_attach
		= limit_uint(rblock->max_total_mcast_qp_attach);

	/* translate device capabilities */
	props->device_cap_flags = IB_DEVICE_SYS_IMAGE_GUID |
		IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_N_NOTIFY_CQ;
	for (i = 0; i < ARRAY_SIZE(cap_mapping); i += 2)
		if (rblock->hca_cap_indicators & cap_mapping[i + 1])
			props->device_cap_flags |= cap_mapping[i];

query_device1:
	ehca_free_fw_ctrlblock(rblock);

	return ret;
}
예제 #4
0
int ehca_query_port(struct ib_device *ibdev,
		    u8 port, struct ib_port_attr *props)
{
	int ret = 0;
	u64 h_ret;
	struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
					      ib_device);
	struct hipz_query_port *rblock;

	rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
	if (!rblock) {
		ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
		return -ENOMEM;
	}

	h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
	if (h_ret != H_SUCCESS) {
		ehca_err(&shca->ib_device, "Can't query port properties");
		ret = -EINVAL;
		goto query_port1;
	}

	memset(props, 0, sizeof(struct ib_port_attr));

	props->active_mtu = props->max_mtu = map_mtu(shca, rblock->max_mtu);
	props->port_cap_flags  = rblock->capability_mask;
	props->gid_tbl_len     = rblock->gid_tbl_len;
	if (rblock->max_msg_sz)
		props->max_msg_sz      = rblock->max_msg_sz;
	else
		props->max_msg_sz      = 0x1 << 31;
	props->bad_pkey_cntr   = rblock->bad_pkey_cntr;
	props->qkey_viol_cntr  = rblock->qkey_viol_cntr;
	props->pkey_tbl_len    = rblock->pkey_tbl_len;
	props->lid             = rblock->lid;
	props->sm_lid          = rblock->sm_lid;
	props->lmc             = rblock->lmc;
	props->sm_sl           = rblock->sm_sl;
	props->subnet_timeout  = rblock->subnet_timeout;
	props->init_type_reply = rblock->init_type_reply;
	props->max_vl_num      = map_number_of_vls(shca, rblock->vl_cap);

	if (rblock->state && rblock->phys_width) {
		props->phys_state      = rblock->phys_pstate;
		props->state           = rblock->phys_state;
		props->active_width    = rblock->phys_width;
		props->active_speed    = rblock->phys_speed;
	} else {
		/* old firmware releases don't report physical
		 * port info, so use default values
		 */
		props->phys_state      = 5;
		props->state           = rblock->state;
		props->active_width    = IB_WIDTH_12X;
		props->active_speed    = 0x1;
	}

query_port1:
	ehca_free_fw_ctrlblock(rblock);

	return ret;
}
예제 #5
0
static void parse_ec(struct ehca_shca *shca, u64 eqe)
{
	u8 ec   = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
	u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
	u8 spec_event;
	struct ehca_sport *sport = &shca->sport[port - 1];

	switch (ec) {
	case 0x30: /* port availability change */
		if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
			/* only replay modify_qp calls in autodetect mode;
			 * if AQP1 was destroyed, the port is already down
			 * again and we can drop the event.
			 */
			if (ehca_nr_ports < 0)
				if (replay_modify_qp(sport))
					break;

			sport->port_state = IB_PORT_ACTIVE;
			dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
					    "is active");
			ehca_query_sma_attr(shca, port, &sport->saved_attr);
		} else {
			sport->port_state = IB_PORT_DOWN;
			dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
					    "is inactive");
		}
		break;
	case 0x31:
		/* port configuration change
		 * disruptive change is caused by
		 * LID, PKEY or SM change
		 */
		if (EHCA_BMASK_GET(NEQE_DISRUPTIVE, eqe)) {
			ehca_warn(&shca->ib_device, "disruptive port "
				  "%d configuration change", port);

			sport->port_state = IB_PORT_DOWN;
			dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
					    "is inactive");

			sport->port_state = IB_PORT_ACTIVE;
			dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
					    "is active");
			ehca_query_sma_attr(shca, port,
					    &sport->saved_attr);
		} else
			notify_port_conf_change(shca, port);
		break;
	case 0x32: /* adapter malfunction */
		ehca_err(&shca->ib_device, "Adapter malfunction.");
		break;
	case 0x33:  /* trace stopped */
		ehca_err(&shca->ib_device, "Traced stopped.");
		break;
	case 0x34: /* util async event */
		spec_event = EHCA_BMASK_GET(NEQE_SPECIFIC_EVENT, eqe);
		if (spec_event == 0x80) /* client reregister required */
			dispatch_port_event(shca, port,
					    IB_EVENT_CLIENT_REREGISTER,
					    "client reregister req.");
		else
			ehca_warn(&shca->ib_device, "Unknown util async "
				  "event %x on port %x", spec_event, port);
		break;
	default:
		ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
			 ec, shca->ib_device.name);
		break;
	}

	return;
}
예제 #6
0
void ehca_process_eq(struct ehca_shca *shca, int is_irq)
{
	struct ehca_eq *eq = &shca->eq;
	struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
	u64 eqe_value, ret;
	unsigned long flags;
	int eqe_cnt, i;
	int eq_empty = 0;

	spin_lock_irqsave(&eq->irq_spinlock, flags);
	if (is_irq) {
		const int max_query_cnt = 100;
		int query_cnt = 0;
		int int_state = 1;
		do {
			int_state = hipz_h_query_int_state(
				shca->ipz_hca_handle, eq->ist);
			query_cnt++;
			iosync();
		} while (int_state && query_cnt < max_query_cnt);
		if (unlikely((query_cnt == max_query_cnt)))
			ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x",
				 int_state, query_cnt);
	}

	/* read out all eqes */
	eqe_cnt = 0;
	do {
		u32 token;
		eqe_cache[eqe_cnt].eqe = ehca_poll_eq(shca, eq);
		if (!eqe_cache[eqe_cnt].eqe)
			break;
		eqe_value = eqe_cache[eqe_cnt].eqe->entry;
		if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
			token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
			read_lock(&ehca_cq_idr_lock);
			eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
			if (eqe_cache[eqe_cnt].cq)
				atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
			read_unlock(&ehca_cq_idr_lock);
			if (!eqe_cache[eqe_cnt].cq) {
				ehca_err(&shca->ib_device,
					 "Invalid eqe for non-existing cq "
					 "token=%x", token);
				continue;
			}
		} else
			eqe_cache[eqe_cnt].cq = NULL;
		eqe_cnt++;
	} while (eqe_cnt < EHCA_EQE_CACHE_SIZE);
	if (!eqe_cnt) {
		if (is_irq)
			ehca_dbg(&shca->ib_device,
				 "No eqe found for irq event");
		goto unlock_irq_spinlock;
	} else if (!is_irq) {
		ret = hipz_h_eoi(eq->ist);
		if (ret != H_SUCCESS)
			ehca_err(&shca->ib_device,
				 "bad return code EOI -rc = %lld\n", ret);
		ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
	}
	if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
		ehca_dbg(&shca->ib_device, "too many eqes for one irq event");
	/* enable irq for new packets */
	for (i = 0; i < eqe_cnt; i++) {
		if (eq->eqe_cache[i].cq)
			reset_eq_pending(eq->eqe_cache[i].cq);
	}
	/* check eq */
	spin_lock(&eq->spinlock);
	eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue));
	spin_unlock(&eq->spinlock);
	/* call completion handler for cached eqes */
	for (i = 0; i < eqe_cnt; i++)
		if (eq->eqe_cache[i].cq) {
			if (ehca_scaling_code)
				queue_comp_task(eq->eqe_cache[i].cq);
			else {
				struct ehca_cq *cq = eq->eqe_cache[i].cq;
				comp_event_callback(cq);
				if (atomic_dec_and_test(&cq->nr_events))
					wake_up(&cq->wait_completion);
			}
		} else {
			ehca_dbg(&shca->ib_device, "Got non completion event");
			parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
		}
	/* poll eq if not empty */
	if (eq_empty)
		goto unlock_irq_spinlock;
	do {
		struct ehca_eqe *eqe;
		eqe = ehca_poll_eq(shca, &shca->eq);
		if (!eqe)
			break;
		process_eqe(shca, eqe);
	} while (1);

unlock_irq_spinlock:
	spin_unlock_irqrestore(&eq->irq_spinlock, flags);
}
예제 #7
0
static void parse_identifier(struct ehca_shca *shca, u64 eqe)
{
	u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe);

	switch (identifier) {
	case 0x02: /* path migrated */
		qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG, 0);
		break;
	case 0x03: /* communication established */
		qp_event_callback(shca, eqe, IB_EVENT_COMM_EST, 0);
		break;
	case 0x04: /* send queue drained */
		qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED, 0);
		break;
	case 0x05: /* QP error */
	case 0x06: /* QP error */
		qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL, 1);
		break;
	case 0x07: /* CQ error */
	case 0x08: /* CQ error */
		cq_event_callback(shca, eqe);
		break;
	case 0x09: /* MRMWPTE error */
		ehca_err(&shca->ib_device, "MRMWPTE error.");
		break;
	case 0x0A: /* port event */
		ehca_err(&shca->ib_device, "Port event.");
		break;
	case 0x0B: /* MR access error */
		ehca_err(&shca->ib_device, "MR access error.");
		break;
	case 0x0C: /* EQ error */
		ehca_err(&shca->ib_device, "EQ error.");
		break;
	case 0x0D: /* P/Q_Key mismatch */
		ehca_err(&shca->ib_device, "P/Q_Key mismatch.");
		break;
	case 0x10: /* sampling complete */
		ehca_err(&shca->ib_device, "Sampling complete.");
		break;
	case 0x11: /* unaffiliated access error */
		ehca_err(&shca->ib_device, "Unaffiliated access error.");
		break;
	case 0x12: /* path migrating */
		ehca_err(&shca->ib_device, "Path migrating.");
		break;
	case 0x13: /* interface trace stopped */
		ehca_err(&shca->ib_device, "Interface trace stopped.");
		break;
	case 0x14: /* first error capture info available */
		ehca_info(&shca->ib_device, "First error capture available");
		break;
	case 0x15: /* SRQ limit reached */
		qp_event_callback(shca, eqe, IB_EVENT_SRQ_LIMIT_REACHED, 0);
		break;
	default:
		ehca_err(&shca->ib_device, "Unknown identifier: %x on %s.",
			 identifier, shca->ib_device.name);
		break;
	}

	return;
}
예제 #8
0
static void parse_ec(struct ehca_shca *shca, u64 eqe)
{
	u8 ec   = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
	u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
	u8 spec_event;
	struct ehca_sport *sport = &shca->sport[port - 1];
	unsigned long flags;

	switch (ec) {
	case 0x30: /* port availability change */
		if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
			int suppress_event;
			/* replay modify_qp for sqps */
			spin_lock_irqsave(&sport->mod_sqp_lock, flags);
			suppress_event = !sport->ibqp_sqp[IB_QPT_GSI];
			if (sport->ibqp_sqp[IB_QPT_SMI])
				ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]);
			if (!suppress_event)
				ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]);
			spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);

			/* AQP1 was destroyed, ignore this event */
			if (suppress_event)
				break;

			sport->port_state = IB_PORT_ACTIVE;
			dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
					    "is active");
			ehca_query_sma_attr(shca, port,
					    &sport->saved_attr);
		} else {
			sport->port_state = IB_PORT_DOWN;
			dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
					    "is inactive");
		}
		break;
	case 0x31:
		/* port configuration change
		 * disruptive change is caused by
		 * LID, PKEY or SM change
		 */
		if (EHCA_BMASK_GET(NEQE_DISRUPTIVE, eqe)) {
			ehca_warn(&shca->ib_device, "disruptive port "
				  "%d configuration change", port);

			sport->port_state = IB_PORT_DOWN;
			dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
					    "is inactive");

			sport->port_state = IB_PORT_ACTIVE;
			dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
					    "is active");
			ehca_query_sma_attr(shca, port,
					    &sport->saved_attr);
		} else
			notify_port_conf_change(shca, port);
		break;
	case 0x32: /* adapter malfunction */
		ehca_err(&shca->ib_device, "Adapter malfunction.");
		break;
	case 0x33:  /* trace stopped */
		ehca_err(&shca->ib_device, "Traced stopped.");
		break;
	case 0x34: /* util async event */
		spec_event = EHCA_BMASK_GET(NEQE_SPECIFIC_EVENT, eqe);
		if (spec_event == 0x80) /* client reregister required */
			dispatch_port_event(shca, port,
					    IB_EVENT_CLIENT_REREGISTER,
					    "client reregister req.");
		else
			ehca_warn(&shca->ib_device, "Unknown util async "
				  "event %x on port %x", spec_event, port);
		break;
	default:
		ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
			 ec, shca->ib_device.name);
		break;
	}

	return;
}
예제 #9
0
int ehca_create_eq(struct ehca_shca *shca,
		   struct ehca_eq *eq,
		   const enum ehca_eq_type type, const u32 length)
{
	int ret;
	u64 h_ret;
	u32 nr_pages;
	u32 i;
	void *vpage;
	struct ib_device *ib_dev = &shca->ib_device;

	spin_lock_init(&eq->spinlock);
	spin_lock_init(&eq->irq_spinlock);
	eq->is_initialized = 0;

	if (type != EHCA_EQ && type != EHCA_NEQ) {
		ehca_err(ib_dev, "Invalid EQ type %x. eq=%p", type, eq);
		return -EINVAL;
	}
	if (!length) {
		ehca_err(ib_dev, "EQ length must not be zero. eq=%p", eq);
		return -EINVAL;
	}

	h_ret = hipz_h_alloc_resource_eq(shca->ipz_hca_handle,
					 &eq->pf,
					 type,
					 length,
					 &eq->ipz_eq_handle,
					 &eq->length,
					 &nr_pages, &eq->ist);

	if (h_ret != H_SUCCESS) {
		ehca_err(ib_dev, "Can't allocate EQ/NEQ. eq=%p", eq);
		return -EINVAL;
	}

	ret = ipz_queue_ctor(NULL, &eq->ipz_queue, nr_pages,
			     EHCA_PAGESIZE, sizeof(struct ehca_eqe), 0, 0);
	if (!ret) {
		ehca_err(ib_dev, "Can't allocate EQ pages eq=%p", eq);
		goto create_eq_exit1;
	}

	for (i = 0; i < nr_pages; i++) {
		u64 rpage;

		vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
		if (!vpage)
			goto create_eq_exit2;

		rpage = virt_to_abs(vpage);
		h_ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle,
						 eq->ipz_eq_handle,
						 &eq->pf,
						 0, 0, rpage, 1);

		if (i == (nr_pages - 1)) {
			/* last page */
			vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
			if (h_ret != H_SUCCESS || vpage)
				goto create_eq_exit2;
		} else {
			if (h_ret != H_PAGE_REGISTERED)
				goto create_eq_exit2;
		}
	}

	ipz_qeit_reset(&eq->ipz_queue);

	/* register interrupt handlers and initialize work queues */
	if (type == EHCA_EQ) {
		ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq,
					  IRQF_DISABLED, "ehca_eq",
					  (void *)shca);
		if (ret < 0)
			ehca_err(ib_dev, "Can't map interrupt handler.");

		tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
	} else if (type == EHCA_NEQ) {
		ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq,
					  IRQF_DISABLED, "ehca_neq",
					  (void *)shca);
		if (ret < 0)
			ehca_err(ib_dev, "Can't map interrupt handler.");

		tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca);
	}

	eq->is_initialized = 1;

	return 0;

create_eq_exit2:
	ipz_queue_dtor(NULL, &eq->ipz_queue);

create_eq_exit1:
	hipz_h_destroy_eq(shca->ipz_hca_handle, eq);

	return -EINVAL;
}
예제 #10
0
파일: ehca_hca.c 프로젝트: cilynx/dd-wrt
int ehca_query_port(struct ib_device *ibdev,
		    u8 port, struct ib_port_attr *props)
{
	int ret = 0;
	u64 h_ret;
	struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
					      ib_device);
	struct hipz_query_port *rblock;

	rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
	if (!rblock) {
		ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
		return -ENOMEM;
	}

	h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
	if (h_ret != H_SUCCESS) {
		ehca_err(&shca->ib_device, "Can't query port properties");
		ret = -EINVAL;
		goto query_port1;
	}

	memset(props, 0, sizeof(struct ib_port_attr));
	props->state = rblock->state;

	switch (rblock->max_mtu) {
	case 0x1:
		props->active_mtu = props->max_mtu = IB_MTU_256;
		break;
	case 0x2:
		props->active_mtu = props->max_mtu = IB_MTU_512;
		break;
	case 0x3:
		props->active_mtu = props->max_mtu = IB_MTU_1024;
		break;
	case 0x4:
		props->active_mtu = props->max_mtu = IB_MTU_2048;
		break;
	case 0x5:
		props->active_mtu = props->max_mtu = IB_MTU_4096;
		break;
	default:
		ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
			 rblock->max_mtu);
		break;
	}

	props->port_cap_flags  = rblock->capability_mask;
	props->gid_tbl_len     = rblock->gid_tbl_len;
	props->max_msg_sz      = rblock->max_msg_sz;
	props->bad_pkey_cntr   = rblock->bad_pkey_cntr;
	props->qkey_viol_cntr  = rblock->qkey_viol_cntr;
	props->pkey_tbl_len    = rblock->pkey_tbl_len;
	props->lid             = rblock->lid;
	props->sm_lid          = rblock->sm_lid;
	props->lmc             = rblock->lmc;
	props->sm_sl           = rblock->sm_sl;
	props->subnet_timeout  = rblock->subnet_timeout;
	props->init_type_reply = rblock->init_type_reply;

	props->active_width    = IB_WIDTH_12X;
	props->active_speed    = 0x1;

	/* at the moment (logical) link state is always LINK_UP */
	props->phys_state      = 0x5;

query_port1:
	ehca_free_fw_ctrlblock(rblock);

	return ret;
}
예제 #11
0
static void parse_ec(struct ehca_shca *shca, u64 eqe)
{
    struct ib_event event;
    u8 ec   = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
    u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);

    switch (ec) {
    case 0x30: /* port availability change */
        if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
            ehca_info(&shca->ib_device,
                      "port %x is active.", port);
            event.device = &shca->ib_device;
            event.event = IB_EVENT_PORT_ACTIVE;
            event.element.port_num = port;
            shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
            ib_dispatch_event(&event);
        } else {
            ehca_info(&shca->ib_device,
                      "port %x is inactive.", port);
            event.device = &shca->ib_device;
            event.event = IB_EVENT_PORT_ERR;
            event.element.port_num = port;
            shca->sport[port - 1].port_state = IB_PORT_DOWN;
            ib_dispatch_event(&event);
        }
        break;
    case 0x31:
        /* port configuration change
         * disruptive change is caused by
         * LID, PKEY or SM change
         */
        ehca_warn(&shca->ib_device,
                  "disruptive port %x configuration change", port);

        ehca_info(&shca->ib_device,
                  "port %x is inactive.", port);
        event.device = &shca->ib_device;
        event.event = IB_EVENT_PORT_ERR;
        event.element.port_num = port;
        shca->sport[port - 1].port_state = IB_PORT_DOWN;
        ib_dispatch_event(&event);

        ehca_info(&shca->ib_device,
                  "port %x is active.", port);
        event.device = &shca->ib_device;
        event.event = IB_EVENT_PORT_ACTIVE;
        event.element.port_num = port;
        shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
        ib_dispatch_event(&event);
        break;
    case 0x32: /* adapter malfunction */
        ehca_err(&shca->ib_device, "Adapter malfunction.");
        break;
    case 0x33:  /* trace stopped */
        ehca_err(&shca->ib_device, "Traced stopped.");
        break;
    default:
        ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
                 ec, shca->ib_device.name);
        break;
    }

    return;
}