void
impl_acc_err_init(ddi_acc_hdl_t *handlep)
{
	int fmcap;
	ndi_err_t *errp;
	on_trap_data_t *otp;
	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)handlep;

	fmcap = ddi_fm_capable(handlep->ah_dip);

	if (handlep->ah_acc.devacc_attr_version < DDI_DEVICE_ATTR_V1 ||
	    !DDI_FM_ACC_ERR_CAP(fmcap)) {
		handlep->ah_acc.devacc_attr_access = DDI_DEFAULT_ACC;
	} else if (DDI_FM_ACC_ERR_CAP(fmcap)) {
		if (handlep->ah_acc.devacc_attr_access == DDI_DEFAULT_ACC) {
			i_ddi_drv_ereport_post(handlep->ah_dip, DVR_EFMCAP,
			    NULL, DDI_NOSLEEP);
		} else {
			errp = hp->ahi_err;
			otp = (on_trap_data_t *)errp->err_ontrap;
			otp->ot_handle = (void *)(hp);
			otp->ot_prot = OT_DATA_ACCESS;
			errp->err_status = DDI_FM_OK;
			errp->err_expected = DDI_FM_ERR_UNEXPECTED;
			errp->err_cf = impl_acc_check;
		}
	}
}
Beispiel #2
0
/*
 * ndi_fmc_insert -
 * 	Add a new entry to the specified cache.
 *
 * 	This function must be called at or below LOCK_LEVEL
 */
void
ndi_fmc_insert(dev_info_t *dip, int flag, void *resource, void *bus_specific)
{
	struct dev_info *devi = DEVI(dip);
	ndi_fmc_t *fcp;
	ndi_fmcentry_t *fep, **fpp;
	struct i_ddi_fmhdl *fmhdl;

	ASSERT(devi);
	ASSERT(flag == DMA_HANDLE || flag == ACC_HANDLE);

	fmhdl = devi->devi_fmhdl;
	if (fmhdl == NULL) {
		return;
	}

	if (flag == DMA_HANDLE) {
		if (!DDI_FM_DMA_ERR_CAP(fmhdl->fh_cap)) {
			return;
		}
		fcp = fmhdl->fh_dma_cache;
		fpp = &((ddi_dma_impl_t *)resource)->dmai_error.err_fep;
	} else if (flag == ACC_HANDLE) {
		if (!DDI_FM_ACC_ERR_CAP(fmhdl->fh_cap)) {
			i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL,
			    DDI_NOSLEEP);
			return;
		}
		fcp = fmhdl->fh_acc_cache;
		fpp = &((ddi_acc_impl_t *)resource)->ahi_err->err_fep;
	}

	fep = kmem_cache_alloc(ndi_fm_entry_cache, KM_NOSLEEP);
	if (fep == NULL) {
		atomic_inc_64(&fmhdl->fh_kstat.fek_fmc_full.value.ui64);
		return;
	}

	/*
	 * Set-up the handle resource and bus_specific information.
	 * Also remember the pointer back to the cache for quick removal.
	 */
	fep->fce_bus_specific = bus_specific;
	fep->fce_resource = resource;
	fep->fce_next = NULL;

	/* Add entry to the end of the active list */
	mutex_enter(&fcp->fc_lock);
	ASSERT(*fpp == NULL);
	*fpp = fep;
	fep->fce_prev = fcp->fc_tail;
	if (fcp->fc_tail != NULL)
		fcp->fc_tail->fce_next = fep;
	else
		fcp->fc_head = fep;
	fcp->fc_tail = fep;
	mutex_exit(&fcp->fc_lock);
}
Beispiel #3
0
/*
 * function to modify register access attributes corresponding to the
 * FM capabilities configured by the user
 *
 * fm_caps - fm capability configured by the user and accepted by the driver
 */
void
oce_set_reg_fma_flags(int fm_caps)
{
	if (fm_caps == DDI_FM_NOT_CAPABLE) {
		return;
	}
	if (DDI_FM_ACC_ERR_CAP(fm_caps)) {
		reg_accattr.devacc_attr_access = DDI_FLAGERR_ACC;
	} else {
		reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC;
	}
} /* oce_set_fma_flags */
Beispiel #4
0
int
pci_config_setup(dev_info_t *dip, ddi_acc_handle_t *handle)
{
	caddr_t	cfgaddr;
	ddi_device_acc_attr_t attr;

	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
	attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;

	/* Check for fault management capabilities */
	if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(dip))) {
		attr.devacc_attr_version = DDI_DEVICE_ATTR_V1;
		attr.devacc_attr_access = DDI_FLAGERR_ACC;
	}

	return (ddi_regs_map_setup(dip, 0, &cfgaddr, 0, 0, &attr, handle));
}
Beispiel #5
0
/*
 * bus map entry point:
 *
 * 	if map request is for an rnumber
 *		get the corresponding regspec from device node
 * 	build a new regspec in our parent's format
 *	build a new map_req with the new regspec
 *	call up the tree to complete the mapping
 */
int
px_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
	off_t off, off_t len, caddr_t *addrp)
{
	px_t *px_p = DIP_TO_STATE(dip);
	struct regspec p_regspec;
	ddi_map_req_t p_mapreq;
	int reglen, rval, r_no;
	pci_regspec_t reloc_reg, *rp = &reloc_reg;

	DBG(DBG_MAP, dip, "rdip=%s%d:",
	    ddi_driver_name(rdip), ddi_get_instance(rdip));

	if (mp->map_flags & DDI_MF_USER_MAPPING)
		return (DDI_ME_UNIMPLEMENTED);

	switch (mp->map_type) {
	case DDI_MT_REGSPEC:
		reloc_reg = *(pci_regspec_t *)mp->map_obj.rp;	/* dup whole */
		break;

	case DDI_MT_RNUMBER:
		r_no = mp->map_obj.rnumber;
		DBG(DBG_MAP | DBG_CONT, dip, " r#=%x", r_no);

		if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS,
		    "reg", (caddr_t)&rp, &reglen) != DDI_SUCCESS)
			return (DDI_ME_RNUMBER_RANGE);

		if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) {
			kmem_free(rp, reglen);
			return (DDI_ME_RNUMBER_RANGE);
		}
		rp += r_no;
		break;

	default:
		return (DDI_ME_INVAL);
	}
	DBG(DBG_MAP | DBG_CONT, dip, "\n");

	if ((rp->pci_phys_hi & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG) {
		/*
		 * There may be a need to differentiate between PCI
		 * and PCI-Ex devices so the following range check is
		 * done correctly, depending on the implementation of
		 * pcieb bridge nexus driver.
		 */
		if ((off >= PCIE_CONF_HDR_SIZE) ||
		    (len > PCIE_CONF_HDR_SIZE) ||
		    (off + len > PCIE_CONF_HDR_SIZE))
			return (DDI_ME_INVAL);
		/*
		 * the following function returning a DDI_FAILURE assumes
		 * that there are no virtual config space access services
		 * defined in this layer. Otherwise it is availed right
		 * here and we return.
		 */
		rval = px_lib_map_vconfig(dip, mp, off, rp, addrp);
		if (rval == DDI_SUCCESS)
			goto done;
	}

	/*
	 * No virtual config space services or we are mapping
	 * a region of memory mapped config/IO/memory space, so proceed
	 * to the parent.
	 */

	/* relocate within 64-bit pci space through "assigned-addresses" */
	if (rval = px_reloc_reg(dip, rdip, px_p, rp))
		goto done;

	if (len)	/* adjust regspec according to mapping request */
		rp->pci_size_low = len;	/* MIN ? */
	rp->pci_phys_low += off;

	/* translate relocated pci regspec into parent space through "ranges" */
	if (rval = px_xlate_reg(px_p, rp, &p_regspec))
		goto done;

	p_mapreq = *mp;		/* dup the whole structure */
	p_mapreq.map_type = DDI_MT_REGSPEC;
	p_mapreq.map_obj.rp = &p_regspec;
	px_lib_map_attr_check(&p_mapreq);
	rval = ddi_map(dip, &p_mapreq, 0, 0, addrp);

	if (rval == DDI_SUCCESS) {
		/*
		 * Set-up access functions for FM access error capable drivers.
		 */
		if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)))
			px_fm_acc_setup(mp, rdip, rp);
	}

done:
	if (mp->map_type == DDI_MT_RNUMBER)
		kmem_free(rp - r_no, reglen);

	return (rval);
}
void
impl_acc_hdl_init(ddi_acc_hdl_t *handlep)
{
	ddi_acc_impl_t *hp;
	int fmcap;
	int devacc_attr_access;

	if (!handlep)
		return;
	fmcap = ddi_fm_capable(handlep->ah_dip);
	if (handlep->ah_acc.devacc_attr_version < DDI_DEVICE_ATTR_V1 ||
	    !DDI_FM_ACC_ERR_CAP(fmcap))
		devacc_attr_access = DDI_DEFAULT_ACC;
	else
		devacc_attr_access = handlep->ah_acc.devacc_attr_access;

	hp = (ddi_acc_impl_t *)handlep->ah_platform_private;
	switch (devacc_attr_access) {
	case DDI_FLAGERR_ACC:
	case DDI_CAUTIOUS_ACC:
		hp->ahi_get8 = i_ddi_caut_get8;
		hp->ahi_put8 = i_ddi_caut_put8;
		hp->ahi_rep_get8 = i_ddi_caut_rep_get8;
		hp->ahi_rep_put8 = i_ddi_caut_rep_put8;
		hp->ahi_get16 = i_ddi_caut_get16;
		hp->ahi_get32 = i_ddi_caut_get32;
		hp->ahi_put16 = i_ddi_caut_put16;
		hp->ahi_put32 = i_ddi_caut_put32;
		hp->ahi_rep_get16 = i_ddi_caut_rep_get16;
		hp->ahi_rep_get32 = i_ddi_caut_rep_get32;
		hp->ahi_rep_put16 = i_ddi_caut_rep_put16;
		hp->ahi_rep_put32 = i_ddi_caut_rep_put32;
		hp->ahi_get64 = i_ddi_caut_get64;
		hp->ahi_put64 = i_ddi_caut_put64;
		hp->ahi_rep_get64 = i_ddi_caut_rep_get64;
		hp->ahi_rep_put64 = i_ddi_caut_rep_put64;
		break;
	case DDI_DEFAULT_ACC:
		if (hp->ahi_acc_attr & DDI_ACCATTR_IO_SPACE) {
			hp->ahi_get8 = i_ddi_io_get8;
			hp->ahi_put8 = i_ddi_io_put8;
			hp->ahi_rep_get8 = i_ddi_io_rep_get8;
			hp->ahi_rep_put8 = i_ddi_io_rep_put8;

			/* temporary set these 64 functions to no-ops */
			hp->ahi_get64 = i_ddi_io_get64;
			hp->ahi_put64 = i_ddi_io_put64;
			hp->ahi_rep_get64 = i_ddi_io_rep_get64;
			hp->ahi_rep_put64 = i_ddi_io_rep_put64;

			/*
			 * check for BIG endian access
			 */
			if (handlep->ah_acc.devacc_attr_endian_flags ==
				DDI_STRUCTURE_BE_ACC) {
				hp->ahi_get16 = i_ddi_io_swap_get16;
				hp->ahi_get32 = i_ddi_io_swap_get32;
				hp->ahi_put16 = i_ddi_io_swap_put16;
				hp->ahi_put32 = i_ddi_io_swap_put32;
				hp->ahi_rep_get16 = i_ddi_io_swap_rep_get16;
				hp->ahi_rep_get32 = i_ddi_io_swap_rep_get32;
				hp->ahi_rep_put16 = i_ddi_io_swap_rep_put16;
				hp->ahi_rep_put32 = i_ddi_io_swap_rep_put32;
			} else {
				hp->ahi_acc_attr |= DDI_ACCATTR_DIRECT;
				hp->ahi_get16 = i_ddi_io_get16;
				hp->ahi_get32 = i_ddi_io_get32;
				hp->ahi_put16 = i_ddi_io_put16;
				hp->ahi_put32 = i_ddi_io_put32;
				hp->ahi_rep_get16 = i_ddi_io_rep_get16;
				hp->ahi_rep_get32 = i_ddi_io_rep_get32;
				hp->ahi_rep_put16 = i_ddi_io_rep_put16;
				hp->ahi_rep_put32 = i_ddi_io_rep_put32;
			}

		} else if (hp->ahi_acc_attr & DDI_ACCATTR_CPU_VADDR) {

			hp->ahi_get8 = i_ddi_vaddr_get8;
			hp->ahi_put8 = i_ddi_vaddr_put8;
			hp->ahi_rep_get8 = i_ddi_vaddr_rep_get8;
			hp->ahi_rep_put8 = i_ddi_vaddr_rep_put8;

			/*
			 * check for BIG endian access
			 */
			if (handlep->ah_acc.devacc_attr_endian_flags ==
				DDI_STRUCTURE_BE_ACC) {

				hp->ahi_get16 = i_ddi_vaddr_swap_get16;
				hp->ahi_get32 = i_ddi_vaddr_swap_get32;
				hp->ahi_get64 = i_ddi_vaddr_swap_get64;
				hp->ahi_put16 = i_ddi_vaddr_swap_put16;
				hp->ahi_put32 = i_ddi_vaddr_swap_put32;
				hp->ahi_put64 = i_ddi_vaddr_swap_put64;
				hp->ahi_rep_get16 = i_ddi_vaddr_swap_rep_get16;
				hp->ahi_rep_get32 = i_ddi_vaddr_swap_rep_get32;
				hp->ahi_rep_get64 = i_ddi_vaddr_swap_rep_get64;
				hp->ahi_rep_put16 = i_ddi_vaddr_swap_rep_put16;
				hp->ahi_rep_put32 = i_ddi_vaddr_swap_rep_put32;
				hp->ahi_rep_put64 = i_ddi_vaddr_swap_rep_put64;
			} else {
				hp->ahi_acc_attr |= DDI_ACCATTR_DIRECT;
				hp->ahi_get16 = i_ddi_vaddr_get16;
				hp->ahi_get32 = i_ddi_vaddr_get32;
				hp->ahi_get64 = i_ddi_vaddr_get64;
				hp->ahi_put16 = i_ddi_vaddr_put16;
				hp->ahi_put32 = i_ddi_vaddr_put32;
				hp->ahi_put64 = i_ddi_vaddr_put64;
				hp->ahi_rep_get16 = i_ddi_vaddr_rep_get16;
				hp->ahi_rep_get32 = i_ddi_vaddr_rep_get32;
				hp->ahi_rep_get64 = i_ddi_vaddr_rep_get64;
				hp->ahi_rep_put16 = i_ddi_vaddr_rep_put16;
				hp->ahi_rep_put32 = i_ddi_vaddr_rep_put32;
				hp->ahi_rep_put64 = i_ddi_vaddr_rep_put64;
			}
		}
		break;
	}
	hp->ahi_fault_check = i_ddi_acc_fault_check;
	hp->ahi_fault_notify = i_ddi_acc_fault_notify;
	hp->ahi_fault = 0;
	impl_acc_err_init(handlep);
}
Beispiel #7
0
int
ndi_fmc_entry_error_all(dev_info_t *dip, int flag, ddi_fm_error_t *derr)
{
	ndi_fmc_t *fcp = NULL;
	ndi_fmcentry_t *fep;
	struct i_ddi_fmhdl *fmhdl;
	int nonfatal = 0;

	ASSERT(flag == DMA_HANDLE || flag == ACC_HANDLE);

	fmhdl = DEVI(dip)->devi_fmhdl;
	ASSERT(fmhdl);

	if (flag == DMA_HANDLE && DDI_FM_DMA_ERR_CAP(fmhdl->fh_cap)) {
		fcp = fmhdl->fh_dma_cache;
		ASSERT(fcp);
	} else if (flag == ACC_HANDLE && DDI_FM_ACC_ERR_CAP(fmhdl->fh_cap)) {
		fcp = fmhdl->fh_acc_cache;
		ASSERT(fcp);
	}

	if (fcp != NULL) {
		/*
		 * Check active resource entries
		 */
		mutex_enter(&fcp->fc_lock);
		for (fep = fcp->fc_head; fep != NULL; fep = fep->fce_next) {
			ddi_fmcompare_t compare_func;

			compare_func = (flag == ACC_HANDLE) ?
			    i_ddi_fm_acc_err_cf_get((ddi_acc_handle_t)
			    fep->fce_resource) :
			    i_ddi_fm_dma_err_cf_get((ddi_dma_handle_t)
			    fep->fce_resource);

			if (compare_func == NULL) /* unbound or not FLAGERR */
				continue;

			/* Set the error for this resource handle */
			nonfatal++;

			if (flag == ACC_HANDLE) {
				ddi_acc_handle_t ap = fep->fce_resource;

				i_ddi_fm_acc_err_set(ap, derr->fme_ena,
				    DDI_FM_NONFATAL, DDI_FM_ERR_UNEXPECTED);
				ddi_fm_acc_err_get(ap, derr, DDI_FME_VERSION);
				derr->fme_acc_handle = ap;
			} else {
				ddi_dma_handle_t dp = fep->fce_resource;

				i_ddi_fm_dma_err_set(dp, derr->fme_ena,
				    DDI_FM_NONFATAL, DDI_FM_ERR_UNEXPECTED);
				ddi_fm_dma_err_get(dp, derr, DDI_FME_VERSION);
				derr->fme_dma_handle = dp;
			}
		}
		mutex_exit(&fcp->fc_lock);
	}
	return (nonfatal ? DDI_FM_NONFATAL : DDI_FM_UNKNOWN);
}
Beispiel #8
0
int
ndi_fmc_entry_error(dev_info_t *dip, int flag, ddi_fm_error_t *derr,
    const void *bus_err_state)
{
	int status, fatal = 0, nonfatal = 0;
	ndi_fmc_t *fcp = NULL;
	ndi_fmcentry_t *fep;
	struct i_ddi_fmhdl *fmhdl;

	ASSERT(flag == DMA_HANDLE || flag == ACC_HANDLE);

	fmhdl = DEVI(dip)->devi_fmhdl;
	ASSERT(fmhdl);
	status = DDI_FM_UNKNOWN;

	if (flag == DMA_HANDLE && DDI_FM_DMA_ERR_CAP(fmhdl->fh_cap)) {
		fcp = fmhdl->fh_dma_cache;
		ASSERT(fcp);
	} else if (flag == ACC_HANDLE && DDI_FM_ACC_ERR_CAP(fmhdl->fh_cap)) {
		fcp = fmhdl->fh_acc_cache;
		ASSERT(fcp);
	}

	if (fcp != NULL) {

		/*
		 * Check active resource entries
		 */
		mutex_enter(&fcp->fc_lock);
		for (fep = fcp->fc_head; fep != NULL; fep = fep->fce_next) {
			ddi_fmcompare_t compare_func;

			/*
			 * Compare captured error state with handle
			 * resources.  During the comparison and
			 * subsequent error handling, we block
			 * attempts to free the cache entry.
			 */
			compare_func = (flag == ACC_HANDLE) ?
			    i_ddi_fm_acc_err_cf_get((ddi_acc_handle_t)
			    fep->fce_resource) :
			    i_ddi_fm_dma_err_cf_get((ddi_dma_handle_t)
			    fep->fce_resource);

			if (compare_func == NULL) /* unbound or not FLAGERR */
				continue;

			status = compare_func(dip, fep->fce_resource,
			    bus_err_state, fep->fce_bus_specific);
			if (status == DDI_FM_UNKNOWN || status == DDI_FM_OK)
				continue;

			if (status == DDI_FM_FATAL)
				++fatal;
			else if (status == DDI_FM_NONFATAL)
				++nonfatal;

			/* Set the error for this resource handle */
			if (flag == ACC_HANDLE) {
				ddi_acc_handle_t ap = fep->fce_resource;

				i_ddi_fm_acc_err_set(ap, derr->fme_ena, status,
				    DDI_FM_ERR_UNEXPECTED);
				ddi_fm_acc_err_get(ap, derr, DDI_FME_VERSION);
				derr->fme_acc_handle = ap;
			} else {
				ddi_dma_handle_t dp = fep->fce_resource;

				i_ddi_fm_dma_err_set(dp, derr->fme_ena, status,
				    DDI_FM_ERR_UNEXPECTED);
				ddi_fm_dma_err_get(dp, derr, DDI_FME_VERSION);
				derr->fme_dma_handle = dp;
			}
		}
		mutex_exit(&fcp->fc_lock);
	}
	return (fatal ? DDI_FM_FATAL : nonfatal ? DDI_FM_NONFATAL :
	    DDI_FM_UNKNOWN);
}
Beispiel #9
0
/*
 * 	Remove an entry from the specified cache of access or dma mappings
 *
 * 	This function must be called at or below LOCK_LEVEL.
 */
void
ndi_fmc_remove(dev_info_t *dip, int flag, const void *resource)
{
	ndi_fmc_t *fcp;
	ndi_fmcentry_t *fep;
	struct dev_info *devi = DEVI(dip);
	struct i_ddi_fmhdl *fmhdl;

	ASSERT(devi);
	ASSERT(flag == DMA_HANDLE || flag == ACC_HANDLE);

	fmhdl = devi->devi_fmhdl;
	if (fmhdl == NULL) {
		return;
	}

	/* Find cache entry pointer for this resource */
	if (flag == DMA_HANDLE) {
		if (!DDI_FM_DMA_ERR_CAP(fmhdl->fh_cap)) {
			return;
		}
		fcp = fmhdl->fh_dma_cache;

		ASSERT(fcp);

		mutex_enter(&fcp->fc_lock);
		fep = ((ddi_dma_impl_t *)resource)->dmai_error.err_fep;
		((ddi_dma_impl_t *)resource)->dmai_error.err_fep = NULL;
	} else if (flag == ACC_HANDLE) {
		if (!DDI_FM_ACC_ERR_CAP(fmhdl->fh_cap)) {
			i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL,
			    DDI_NOSLEEP);
			return;
		}
		fcp = fmhdl->fh_acc_cache;

		ASSERT(fcp);

		mutex_enter(&fcp->fc_lock);
		fep = ((ddi_acc_impl_t *)resource)->ahi_err->err_fep;
		((ddi_acc_impl_t *)resource)->ahi_err->err_fep = NULL;
	} else {
		return;
	}

	/*
	 * Resource not in cache, return
	 */
	if (fep == NULL) {
		mutex_exit(&fcp->fc_lock);
		atomic_inc_64(&fmhdl->fh_kstat.fek_fmc_miss.value.ui64);
		return;
	}

	/*
	 * Updates to FM cache pointers require us to grab fmc_lock
	 * to synchronize access to the cache for ndi_fmc_insert()
	 * and ndi_fmc_error()
	 */
	if (fep == fcp->fc_head)
		fcp->fc_head = fep->fce_next;
	else
		fep->fce_prev->fce_next = fep->fce_next;
	if (fep == fcp->fc_tail)
		fcp->fc_tail = fep->fce_prev;
	else
		fep->fce_next->fce_prev = fep->fce_prev;
	mutex_exit(&fcp->fc_lock);

	kmem_cache_free(ndi_fm_entry_cache, fep);
}