コード例 #1
0
ファイル: px_pec.c プロジェクト: andreiw/polaris
/*
 * px_pec_msg_rem_intr:
 *
 * Remove interrupt handlers to process correctable/fatal/non fatal
 * PCIE messages. For now, all these PCIe messages are mapped to
 * same MSIQ.
 */
static void
px_pec_msg_rem_intr(px_t *px_p)
{
	dev_info_t		*dip = px_p->px_dip;
	px_pec_t		*pec_p = px_p->px_pec_p;
	ddi_intr_handle_impl_t	hdl;

	DBG(DBG_MSG, px_p->px_dip, "px_pec_msg_rem_intr: dip 0x%p\n", dip);

	/* Initialize handle */
	bzero(&hdl, sizeof (ddi_intr_handle_impl_t));
	hdl.ih_ver = DDI_INTR_VERSION;
	hdl.ih_state = DDI_IHDL_STATE_ALLOC;
	hdl.ih_dip = dip;

	if (pec_p->pec_corr_msg_msiq_id >= 0) {
		px_lib_msg_setvalid(dip, PCIE_CORR_MSG, PCIE_MSG_INVALID);

		(void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC,
		    PCIE_CORR_MSG, pec_p->pec_corr_msg_msiq_id);

		(void) px_ib_update_intr_state(px_p, px_p->px_dip,
		    hdl.ih_inum, px_msiqid_to_devino(px_p,
		    pec_p->pec_corr_msg_msiq_id),
		    PX_INTR_STATE_DISABLE, MSG_REC, PCIE_CORR_MSG);

		pec_p->pec_corr_msg_msiq_id = -1;
	}

	if (pec_p->pec_non_fatal_msg_msiq_id >= 0) {
		px_lib_msg_setvalid(dip, PCIE_NONFATAL_MSG,
		    PCIE_MSG_INVALID);

		(void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC,
		    PCIE_NONFATAL_MSG, pec_p->pec_non_fatal_msg_msiq_id);

		(void) px_ib_update_intr_state(px_p, px_p->px_dip,
		    hdl.ih_inum, px_msiqid_to_devino(px_p,
		    pec_p->pec_non_fatal_msg_msiq_id),
		    PX_INTR_STATE_DISABLE, MSG_REC, PCIE_NONFATAL_MSG);

		pec_p->pec_non_fatal_msg_msiq_id = -1;
	}

	if (pec_p->pec_fatal_msg_msiq_id >= 0) {
		px_lib_msg_setvalid(dip, PCIE_FATAL_MSG, PCIE_MSG_INVALID);

		(void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC,
		    PCIE_FATAL_MSG, pec_p->pec_fatal_msg_msiq_id);

		(void) px_ib_update_intr_state(px_p, px_p->px_dip,
		    hdl.ih_inum, px_msiqid_to_devino(px_p,
		    pec_p->pec_fatal_msg_msiq_id),
		    PX_INTR_STATE_DISABLE, MSG_REC, PCIE_FATAL_MSG);

		pec_p->pec_fatal_msg_msiq_id = -1;
	}
}
コード例 #2
0
ファイル: px.c プロジェクト: apprisi/illumos-gate
/*
 * undo whatever is done in px_pwr_setup. called by px_detach()
 */
static void
px_pwr_teardown(dev_info_t *dip)
{
	int instance = ddi_get_instance(dip);
	px_t *px_p = INST_TO_STATE(instance);
	ddi_intr_handle_impl_t	hdl;

	if (!PCIE_PMINFO(dip) || !PCIE_NEXUS_PMINFO(dip))
		return;

	/* Initialize handle */
	bzero(&hdl, sizeof (ddi_intr_handle_impl_t));
	hdl.ih_ver = DDI_INTR_VERSION;
	hdl.ih_state = DDI_IHDL_STATE_ALLOC;
	hdl.ih_dip = dip;
	hdl.ih_pri = px_pwr_pil;

	px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID);
	(void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG,
	    px_p->px_pm_msiq_id);

	(void) px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum,
	    px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), px_pwr_pil,
	    PX_INTR_STATE_DISABLE, MSG_REC, PCIE_PME_ACK_MSG);

	px_p->px_pm_msiq_id = (msiqid_t)-1;

	cv_destroy(&px_p->px_l23ready_cv);
	mutex_destroy(&px_p->px_l23ready_lock);
}
コード例 #3
0
ファイル: px.c プロジェクト: apprisi/illumos-gate
/*
 * power management related initialization specific to px
 * called by px_attach()
 */
static int
px_pwr_setup(dev_info_t *dip)
{
	pcie_pwr_t *pwr_p;
	int instance = ddi_get_instance(dip);
	px_t *px_p = INST_TO_STATE(instance);
	ddi_intr_handle_impl_t hdl;

	ASSERT(PCIE_PMINFO(dip));
	pwr_p = PCIE_NEXUS_PMINFO(dip);
	ASSERT(pwr_p);

	/*
	 * indicate support LDI (Layered Driver Interface)
	 * Create the property, if it is not already there
	 */
	if (!ddi_prop_exists(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
	    DDI_KERNEL_IOCTL)) {
		if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
		    DDI_KERNEL_IOCTL, NULL, 0) != DDI_PROP_SUCCESS) {
			DBG(DBG_PWR, dip, "can't create kernel ioctl prop\n");
			return (DDI_FAILURE);
		}
	}
	/* No support for device PM. We are always at full power */
	pwr_p->pwr_func_lvl = PM_LEVEL_D0;

	mutex_init(&px_p->px_l23ready_lock, NULL, MUTEX_DRIVER,
	    DDI_INTR_PRI(px_pwr_pil));
	cv_init(&px_p->px_l23ready_cv, NULL, CV_DRIVER, NULL);

	/* Initialize handle */
	bzero(&hdl, sizeof (ddi_intr_handle_impl_t));
	hdl.ih_cb_arg1 = px_p;
	hdl.ih_ver = DDI_INTR_VERSION;
	hdl.ih_state = DDI_IHDL_STATE_ALLOC;
	hdl.ih_dip = dip;
	hdl.ih_pri = px_pwr_pil;

	/* Add PME_TO_ACK message handler */
	hdl.ih_cb_func = (ddi_intr_handler_t *)px_pmeq_intr;
	if (px_add_msiq_intr(dip, dip, &hdl, MSG_REC,
	    (msgcode_t)PCIE_PME_ACK_MSG, -1,
	    &px_p->px_pm_msiq_id) != DDI_SUCCESS) {
		DBG(DBG_PWR, dip, "px_pwr_setup: couldn't add "
		    " PME_TO_ACK intr\n");
		goto pwr_setup_err1;
	}
	px_lib_msg_setmsiq(dip, PCIE_PME_ACK_MSG, px_p->px_pm_msiq_id);
	px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_VALID);

	if (px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum,
	    px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), px_pwr_pil,
	    PX_INTR_STATE_ENABLE, MSG_REC, PCIE_PME_ACK_MSG) != DDI_SUCCESS) {
		DBG(DBG_PWR, dip, "px_pwr_setup: PME_TO_ACK update interrupt"
		    " state failed\n");
		goto px_pwrsetup_err_state;
	}

	return (DDI_SUCCESS);

px_pwrsetup_err_state:
	px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID);
	(void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG,
	    px_p->px_pm_msiq_id);
pwr_setup_err1:
	mutex_destroy(&px_p->px_l23ready_lock);
	cv_destroy(&px_p->px_l23ready_cv);

	return (DDI_FAILURE);
}
コード例 #4
0
ファイル: px_ib.c プロジェクト: mikess/illumos-gate
/*
 * Associate a new CPU with a given MSI/X.
 * Operate only on MSI/Xs which are already mapped to devices.
 */
int
px_ib_set_msix_target(px_t *px_p, ddi_intr_handle_impl_t *hdlp,
                      msinum_t msi_num, cpuid_t cpu_id)
{
    px_ib_t			*ib_p = px_p->px_ib_p;
    px_msi_state_t		*msi_state_p = &px_p->px_ib_p->ib_msi_state;
    dev_info_t		*dip = px_p->px_dip;
    dev_info_t		*rdip = hdlp->ih_dip;
    msiqid_t		msiq_id, old_msiq_id;
    pci_msi_state_t		msi_state;
    msiq_rec_type_t		msiq_rec_type;
    msi_type_t		msi_type;
    px_ino_t		*ino_p;
    px_ih_t			*ih_p, *old_ih_p;
    cpuid_t			old_cpu_id;
    hrtime_t		start_time, end_time;
    int			ret = DDI_SUCCESS;
    extern const int	_ncpu;
    extern cpu_t		*cpu[];

    DBG(DBG_IB, dip, "px_ib_set_msix_target: msi_num %x new cpu_id %x\n",
        msi_num, cpu_id);

    mutex_enter(&cpu_lock);

    /* Check for MSI64 support */
    if ((hdlp->ih_cap & DDI_INTR_FLAG_MSI64) && msi_state_p->msi_addr64) {
        msiq_rec_type = MSI64_REC;
        msi_type = MSI64_TYPE;
    } else {
        msiq_rec_type = MSI32_REC;
        msi_type = MSI32_TYPE;
    }

    if ((ret = px_lib_msi_getmsiq(dip, msi_num,
                                  &old_msiq_id)) != DDI_SUCCESS) {

        mutex_exit(&cpu_lock);
        return (ret);
    }

    DBG(DBG_IB, dip, "px_ib_set_msix_target: current msiq 0x%x\n",
        old_msiq_id);

    if ((ret = px_ib_get_intr_target(px_p,
                                     px_msiqid_to_devino(px_p, old_msiq_id),
                                     &old_cpu_id)) != DDI_SUCCESS) {

        mutex_exit(&cpu_lock);
        return (ret);
    }

    DBG(DBG_IB, dip, "px_ib_set_msix_target: current cpuid 0x%x\n",
        old_cpu_id);

    if (cpu_id == old_cpu_id) {

        mutex_exit(&cpu_lock);
        return (DDI_SUCCESS);
    }

    /*
     * Get lock, validate cpu and write it.
     */
    if (!((cpu_id < _ncpu) && (cpu[cpu_id] &&
                               cpu_is_online(cpu[cpu_id])))) {
        /* Invalid cpu */
        DBG(DBG_IB, dip, "px_ib_set_msix_target: Invalid cpuid %x\n",
            cpu_id);

        mutex_exit(&cpu_lock);
        return (DDI_EINVAL);
    }

    DBG(DBG_IB, dip, "px_ib_set_msix_target: Enabling CPU %d\n", cpu_id);

    if ((ret = px_add_msiq_intr(dip, rdip, hdlp,
                                msiq_rec_type, msi_num, cpu_id, &msiq_id)) != DDI_SUCCESS) {
        DBG(DBG_IB, dip, "px_ib_set_msix_target: Add MSI handler "
            "failed, rdip 0x%p msi 0x%x\n", rdip, msi_num);

        mutex_exit(&cpu_lock);
        return (ret);
    }

    if ((ret = px_lib_msi_setmsiq(dip, msi_num,
                                  msiq_id, msi_type)) != DDI_SUCCESS) {
        mutex_exit(&cpu_lock);

        (void) px_rem_msiq_intr(dip, rdip,
                                hdlp, msiq_rec_type, msi_num, msiq_id);

        return (ret);
    }

    if ((ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum,
                                       px_msiqid_to_devino(px_p, msiq_id), hdlp->ih_pri,
                                       PX_INTR_STATE_ENABLE, msiq_rec_type, msi_num)) != DDI_SUCCESS) {
        mutex_exit(&cpu_lock);

        (void) px_rem_msiq_intr(dip, rdip,
                                hdlp, msiq_rec_type, msi_num, msiq_id);

        return (ret);
    }

    mutex_exit(&cpu_lock);

    /*
     * Remove the old handler, but first ensure it is finished.
     *
     * Each handler sets its PENDING flag before it clears the MSI state.
     * Then it clears that flag when finished.  If a re-target occurs while
     * the MSI state is DELIVERED, then it is not yet known which of the
     * two handlers will take the interrupt.  So the re-target operation
     * sets a RETARGET flag on both handlers in that case.  Monitoring both
     * flags on both handlers then determines when the old handler can be
     * be safely removed.
     */
    mutex_enter(&ib_p->ib_ino_lst_mutex);

    ino_p = px_ib_locate_ino(ib_p, px_msiqid_to_devino(px_p, old_msiq_id));
    old_ih_p = px_ib_intr_locate_ih(px_ib_ino_locate_ipil(ino_p,
                                    hdlp->ih_pri), rdip, hdlp->ih_inum, msiq_rec_type, msi_num);

    ino_p = px_ib_locate_ino(ib_p, px_msiqid_to_devino(px_p, msiq_id));
    ih_p = px_ib_intr_locate_ih(px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri),
                                rdip, hdlp->ih_inum, msiq_rec_type, msi_num);

    if ((ret = px_lib_msi_getstate(dip, msi_num,
                                   &msi_state)) != DDI_SUCCESS) {
        (void) px_rem_msiq_intr(dip, rdip,
                                hdlp, msiq_rec_type, msi_num, msiq_id);

        mutex_exit(&ib_p->ib_ino_lst_mutex);
        return (ret);
    }

    if (msi_state == PCI_MSI_STATE_DELIVERED) {
        ih_p->ih_intr_flags |= PX_INTR_RETARGET;
        old_ih_p->ih_intr_flags |= PX_INTR_RETARGET;
    }

    start_time = gethrtime();
    while (((ih_p->ih_intr_flags & PX_INTR_RETARGET) &&
            (old_ih_p->ih_intr_flags & PX_INTR_RETARGET)) ||
            (old_ih_p->ih_intr_flags & PX_INTR_PENDING)) {

        /* Wait for one second */
        delay(drv_usectohz(1000000));

        end_time = gethrtime() - start_time;
        if (end_time > px_ib_msix_retarget_timeout) {
            cmn_err(CE_WARN, "MSIX retarget %x is not completed, "
                    "even after waiting %llx ticks\n",
                    msi_num, end_time);
            break;
        }
    }

    ih_p->ih_intr_flags &= ~(PX_INTR_RETARGET);

    mutex_exit(&ib_p->ib_ino_lst_mutex);

    ret = px_rem_msiq_intr(dip, rdip,
                           hdlp, msiq_rec_type, msi_num, old_msiq_id);

    return (ret);
}
コード例 #5
0
ファイル: px_pec.c プロジェクト: andreiw/polaris
/*
 * pec_msg_add_intr:
 *
 * Add interrupt handlers to process correctable/fatal/non fatal
 * PCIE messages.
 */
static int
px_pec_msg_add_intr(px_t *px_p)
{
	dev_info_t		*dip = px_p->px_dip;
	px_pec_t		*pec_p = px_p->px_pec_p;
	ddi_intr_handle_impl_t	hdl;
	int			ret = DDI_SUCCESS;

	DBG(DBG_MSG, px_p->px_dip, "px_pec_msg_add_intr\n");

	/* Initialize handle */
	bzero(&hdl, sizeof (ddi_intr_handle_impl_t));
	hdl.ih_cb_func = (ddi_intr_handler_t *)px_err_fabric_intr;
	hdl.ih_ver = DDI_INTR_VERSION;
	hdl.ih_state = DDI_IHDL_STATE_ALLOC;
	hdl.ih_dip = dip;

	/* Add correctable error message handler */
	hdl.ih_pri = PX_ERR_LOW_PIL;

	if ((ret = px_add_msiq_intr(dip, dip, &hdl,
	    MSG_REC, (msgcode_t)PCIE_CORR_MSG,
	    &pec_p->pec_corr_msg_msiq_id)) != DDI_SUCCESS) {
		DBG(DBG_MSG, px_p->px_dip,
		    "PCIE_CORR_MSG registration failed\n");
		return (DDI_FAILURE);
	}

	px_lib_msg_setmsiq(dip, PCIE_CORR_MSG, pec_p->pec_corr_msg_msiq_id);
	px_lib_msg_setvalid(dip, PCIE_CORR_MSG, PCIE_MSG_VALID);

	if ((ret = px_ib_update_intr_state(px_p, px_p->px_dip,
	    hdl.ih_inum, px_msiqid_to_devino(px_p, pec_p->pec_corr_msg_msiq_id),
	    PX_INTR_STATE_ENABLE, MSG_REC, PCIE_CORR_MSG)) != DDI_SUCCESS) {
		DBG(DBG_MSG, px_p->px_dip,
		    "PCIE_CORR_MSG update interrupt state failed\n");
		return (DDI_FAILURE);
	}

	/* Add non-fatal error message handler */
	hdl.ih_pri = PX_ERR_PIL;

	if ((ret = px_add_msiq_intr(dip, dip, &hdl,
	    MSG_REC, (msgcode_t)PCIE_NONFATAL_MSG,
	    &pec_p->pec_non_fatal_msg_msiq_id)) != DDI_SUCCESS) {
		DBG(DBG_MSG, px_p->px_dip,
		    "PCIE_NONFATAL_MSG registration failed\n");
		return (DDI_FAILURE);
	}

	px_lib_msg_setmsiq(dip, PCIE_NONFATAL_MSG,
	    pec_p->pec_non_fatal_msg_msiq_id);
	px_lib_msg_setvalid(dip, PCIE_NONFATAL_MSG, PCIE_MSG_VALID);

	if ((ret = px_ib_update_intr_state(px_p, px_p->px_dip,
	    hdl.ih_inum, px_msiqid_to_devino(px_p,
	    pec_p->pec_non_fatal_msg_msiq_id), PX_INTR_STATE_ENABLE, MSG_REC,
	    PCIE_NONFATAL_MSG)) != DDI_SUCCESS) {
		DBG(DBG_MSG, px_p->px_dip,
		    "PCIE_NONFATAL_MSG update interrupt state failed\n");
		return (DDI_FAILURE);
	}

	/* Add fatal error message handler */
	hdl.ih_pri = PX_ERR_PIL;

	if ((ret = px_add_msiq_intr(dip, dip, &hdl,
	    MSG_REC, (msgcode_t)PCIE_FATAL_MSG,
	    &pec_p->pec_fatal_msg_msiq_id)) != DDI_SUCCESS) {
		DBG(DBG_MSG, px_p->px_dip,
		    "PCIE_FATAL_MSG registration failed\n");
		return (DDI_FAILURE);
	}

	px_lib_msg_setmsiq(dip, PCIE_FATAL_MSG, pec_p->pec_fatal_msg_msiq_id);
	px_lib_msg_setvalid(dip, PCIE_FATAL_MSG, PCIE_MSG_VALID);

	if ((ret = px_ib_update_intr_state(px_p, px_p->px_dip,
	    hdl.ih_inum, px_msiqid_to_devino(px_p,
	    pec_p->pec_fatal_msg_msiq_id), PX_INTR_STATE_ENABLE, MSG_REC,
	    PCIE_FATAL_MSG)) != DDI_SUCCESS) {
		DBG(DBG_MSG, px_p->px_dip,
		    "PCIE_FATAL_MSG update interrupt state failed\n");
		return (DDI_FAILURE);
	}

	return (ret);
}