Esempio n. 1
0
static void
ppb_removechild(dev_info_t *dip)
{
	struct ddi_parent_private_data *pdptr;
	ppb_devstate_t *ppb;

	ppb = (ppb_devstate_t *)ddi_get_soft_state(ppb_state,
	    ddi_get_instance(ddi_get_parent(dip)));

	if (ppb->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) {
		pcie_fini_dom(dip);
		pcie_fini_cfghdl(dip);
	} else if ((pdptr = ddi_get_parent_data(dip)) != NULL) {
		kmem_free(pdptr, (sizeof (*pdptr) + sizeof (struct intrspec)));
		ddi_set_parent_data(dip, NULL);
	}
	ddi_set_name_addr(dip, NULL);

	/*
	 * Strip the node to properly convert it back to prototype form
	 */
	ddi_remove_minor_node(dip, NULL);

	impl_rem_dev_props(dip);
}
Esempio n. 2
0
void
pcieb_plat_uninitchild(dev_info_t *child)
{
	struct ddi_parent_private_data	*pdptr;

	if ((pdptr = ddi_get_parent_data(child)) != NULL)
		kmem_free(pdptr, (sizeof (*pdptr) + sizeof (struct intrspec)));

	ddi_set_parent_data(child, NULL);
}
Esempio n. 3
0
static struct intrspec *
isa_get_ispec(dev_info_t *rdip, int inum)
{
	struct ddi_parent_private_data *pdp = ddi_get_parent_data(rdip);

	/* Validate the interrupt number */
	if (inum >= pdp->par_nintr)
		return (NULL);

	/* Get the interrupt structure pointer and return that */
	return ((struct intrspec *)&pdp->par_intr[inum]);
}
Esempio n. 4
0
static int
pci_removechild(dev_info_t *dip)
{
	struct ddi_parent_private_data *pdptr;

	if ((pdptr = ddi_get_parent_data(dip)) != NULL) {
		kmem_free(pdptr, (sizeof (*pdptr) + sizeof (struct intrspec)));
		ddi_set_parent_data(dip, NULL);
	}
	ddi_set_name_addr(dip, NULL);

	/*
	 * Strip the node to properly convert it back to prototype form
	 */
	ddi_remove_minor_node(dip, NULL);

	impl_rem_dev_props(dip);

	return (DDI_SUCCESS);
}
Esempio n. 5
0
int
px_fdvma_reserve(dev_info_t *dip, dev_info_t *rdip, px_t *px_p,
	ddi_dma_req_t *dmareq, ddi_dma_handle_t *handlep)
{
	fdvma_t *fdvma_p;
	px_dvma_addr_t dvma_pg;
	px_mmu_t *mmu_p = px_p->px_mmu_p;
	size_t npages;
	ddi_dma_impl_t *mp;
	ddi_dma_lim_t *lim_p = dmareq->dmar_limits;
	ulong_t hi = lim_p->dlim_addr_hi;
	ulong_t lo = lim_p->dlim_addr_lo;
	size_t counter_max = (lim_p->dlim_cntr_max + 1) & MMU_PAGE_MASK;

	if (px_disable_fdvma)
		return (DDI_FAILURE);

	DBG(DBG_DMA_CTL, dip, "DDI_DMA_RESERVE: rdip=%s%d\n",
	    ddi_driver_name(rdip), ddi_get_instance(rdip));

	/*
	 * Check the limit structure.
	 */
	if ((lo >= hi) || (hi < mmu_p->mmu_dvma_base))
		return (DDI_DMA_BADLIMITS);

	/*
	 * Check the size of the request.
	 */
	npages = dmareq->dmar_object.dmao_size;
	if (npages > mmu_p->mmu_dvma_reserve)
		return (DDI_DMA_NORESOURCES);

	/*
	 * Allocate the dma handle.
	 */
	mp = kmem_zalloc(sizeof (px_dma_hdl_t), KM_SLEEP);

	/*
	 * Get entries from dvma space map.
	 * (vmem_t *vmp,
	 *	size_t size, size_t align, size_t phase,
	 *	size_t nocross, void *minaddr, void *maxaddr, int vmflag)
	 */
	dvma_pg = MMU_BTOP((ulong_t)vmem_xalloc(mmu_p->mmu_dvma_map,
	    MMU_PTOB(npages), MMU_PAGE_SIZE, 0,
	    counter_max, (void *)lo, (void *)(hi + 1),
	    dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP));
	if (dvma_pg == 0) {
		kmem_free(mp, sizeof (px_dma_hdl_t));
		return (DDI_DMA_NOMAPPING);
	}
	mmu_p->mmu_dvma_reserve -= npages;

	/*
	 * Create the fast dvma request structure.
	 */
	fdvma_p = kmem_alloc(sizeof (fdvma_t), KM_SLEEP);
	fdvma_p->pagecnt = kmem_alloc(npages * sizeof (uint_t), KM_SLEEP);
	fdvma_p->ops = &fdvma_ops;
	fdvma_p->softsp = (caddr_t)px_p;
	fdvma_p->sync_flag = NULL;

	/*
	 * Initialize the handle.
	 */
	mp->dmai_rdip = rdip;
	mp->dmai_rflags = DMP_BYPASSNEXUS | DDI_DMA_READ | DMP_NOSYNC;
	mp->dmai_burstsizes = dmareq->dmar_limits->dlim_burstsizes;
	mp->dmai_mapping = MMU_PTOB(dvma_pg);
	mp->dmai_ndvmapages = npages;
	mp->dmai_size = npages * MMU_PAGE_SIZE;
	mp->dmai_nwin = 0;
	mp->dmai_fdvma = (caddr_t)fdvma_p;

	/*
	 * The bdf protection value is set to immediate child
	 * at first. It gets modified by switch/bridge drivers
	 * as the code traverses down the fabric topology.
	 *
	 * XXX No IOMMU protection for broken devices.
	 */
	ASSERT((intptr_t)ddi_get_parent_data(rdip) >> 1 == 0);
	mp->dmai_bdf = ((intptr_t)ddi_get_parent_data(rdip) == 1) ?
	    PCIE_INVALID_BDF : pcie_get_bdf_for_dma_xfer(dip, rdip);

	DBG(DBG_DMA_CTL, dip,
	    "DDI_DMA_RESERVE: mp=%p dvma=%x npages=%x private=%p\n",
	    mp, mp->dmai_mapping, npages, fdvma_p);
	*handlep = (ddi_dma_handle_t)mp;
	return (DDI_SUCCESS);
}
Esempio n. 6
0
static int
bd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
	int		inst;
	bd_handle_t	hdl;
	bd_t		*bd;
	bd_drive_t	drive;
	int		rv;
	char		name[16];
	char		kcache[32];

	switch (cmd) {
	case DDI_ATTACH:
		break;
	case DDI_RESUME:
		/* We don't do anything native for suspend/resume */
		return (DDI_SUCCESS);
	default:
		return (DDI_FAILURE);
	}

	inst = ddi_get_instance(dip);
	hdl = ddi_get_parent_data(dip);

	(void) snprintf(name, sizeof (name), "%s%d",
	    ddi_driver_name(dip), ddi_get_instance(dip));
	(void) snprintf(kcache, sizeof (kcache), "%s_xfer", name);

	if (hdl == NULL) {
		cmn_err(CE_WARN, "%s: missing parent data!", name);
		return (DDI_FAILURE);
	}

	if (ddi_soft_state_zalloc(bd_state, inst) != DDI_SUCCESS) {
		cmn_err(CE_WARN, "%s: unable to zalloc soft state!", name);
		return (DDI_FAILURE);
	}
	bd = ddi_get_soft_state(bd_state, inst);

	if (hdl->h_dma) {
		bd->d_dma = *(hdl->h_dma);
		bd->d_dma.dma_attr_granular =
		    max(DEV_BSIZE, bd->d_dma.dma_attr_granular);
		bd->d_use_dma = B_TRUE;

		if (bd->d_maxxfer &&
		    (bd->d_maxxfer != bd->d_dma.dma_attr_maxxfer)) {
			cmn_err(CE_WARN,
			    "%s: inconsistent maximum transfer size!",
			    name);
			/* We force it */
			bd->d_maxxfer = bd->d_dma.dma_attr_maxxfer;
		} else {
			bd->d_maxxfer = bd->d_dma.dma_attr_maxxfer;
		}
	} else {
		bd->d_use_dma = B_FALSE;
		if (bd->d_maxxfer == 0) {
			bd->d_maxxfer = 1024 * 1024;
		}
	}
	bd->d_ops = hdl->h_ops;
	bd->d_private = hdl->h_private;
	bd->d_blkshift = 9;	/* 512 bytes, to start */

	if (bd->d_maxxfer % DEV_BSIZE) {
		cmn_err(CE_WARN, "%s: maximum transfer misaligned!", name);
		bd->d_maxxfer &= ~(DEV_BSIZE - 1);
	}
	if (bd->d_maxxfer < DEV_BSIZE) {
		cmn_err(CE_WARN, "%s: maximum transfer size too small!", name);
		ddi_soft_state_free(bd_state, inst);
		return (DDI_FAILURE);
	}

	bd->d_dip = dip;
	bd->d_handle = hdl;
	hdl->h_bd = bd;
	ddi_set_driver_private(dip, bd);

	mutex_init(&bd->d_iomutex, NULL, MUTEX_DRIVER, NULL);
	mutex_init(&bd->d_ocmutex, NULL, MUTEX_DRIVER, NULL);
	mutex_init(&bd->d_statemutex, NULL, MUTEX_DRIVER, NULL);
	cv_init(&bd->d_statecv, NULL, CV_DRIVER, NULL);

	list_create(&bd->d_waitq, sizeof (bd_xfer_impl_t),
	    offsetof(struct bd_xfer_impl, i_linkage));
	list_create(&bd->d_runq, sizeof (bd_xfer_impl_t),
	    offsetof(struct bd_xfer_impl, i_linkage));

	bd->d_cache = kmem_cache_create(kcache, sizeof (bd_xfer_impl_t), 8,
	    bd_xfer_ctor, bd_xfer_dtor, NULL, bd, NULL, 0);

	bd->d_ksp = kstat_create(ddi_driver_name(dip), inst, NULL, "disk",
	    KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT);
	if (bd->d_ksp != NULL) {
		bd->d_ksp->ks_lock = &bd->d_iomutex;
		kstat_install(bd->d_ksp);
		bd->d_kiop = bd->d_ksp->ks_data;
	} else {
		/*
		 * Even if we cannot create the kstat, we create a
		 * scratch kstat.  The reason for this is to ensure
		 * that we can update the kstat all of the time,
		 * without adding an extra branch instruction.
		 */
		bd->d_kiop = kmem_zalloc(sizeof (kstat_io_t), KM_SLEEP);
	}

	cmlb_alloc_handle(&bd->d_cmlbh);

	bd->d_state = DKIO_NONE;

	bzero(&drive, sizeof (drive));
	bd->d_ops.o_drive_info(bd->d_private, &drive);
	bd->d_qsize = drive.d_qsize;
	bd->d_removable = drive.d_removable;
	bd->d_hotpluggable = drive.d_hotpluggable;

	if (drive.d_maxxfer && drive.d_maxxfer < bd->d_maxxfer)
		bd->d_maxxfer = drive.d_maxxfer;


	rv = cmlb_attach(dip, &bd_tg_ops, DTYPE_DIRECT,
	    bd->d_removable, bd->d_hotpluggable,
	    drive.d_lun >= 0 ? DDI_NT_BLOCK_CHAN : DDI_NT_BLOCK,
	    CMLB_FAKE_LABEL_ONE_PARTITION, bd->d_cmlbh, 0);
	if (rv != 0) {
		cmlb_free_handle(&bd->d_cmlbh);
		kmem_cache_destroy(bd->d_cache);
		mutex_destroy(&bd->d_iomutex);
		mutex_destroy(&bd->d_ocmutex);
		mutex_destroy(&bd->d_statemutex);
		cv_destroy(&bd->d_statecv);
		list_destroy(&bd->d_waitq);
		list_destroy(&bd->d_runq);
		if (bd->d_ksp != NULL) {
			kstat_delete(bd->d_ksp);
			bd->d_ksp = NULL;
		} else {
			kmem_free(bd->d_kiop, sizeof (kstat_io_t));
		}
		ddi_soft_state_free(bd_state, inst);
		return (DDI_FAILURE);
	}

	if (bd->d_ops.o_devid_init != NULL) {
		rv = bd->d_ops.o_devid_init(bd->d_private, dip, &bd->d_devid);
		if (rv == DDI_SUCCESS) {
			if (ddi_devid_register(dip, bd->d_devid) !=
			    DDI_SUCCESS) {
				cmn_err(CE_WARN,
				    "%s: unable to register devid", name);
			}
		}
	}

	/*
	 * Add a zero-length attribute to tell the world we support
	 * kernel ioctls (for layered drivers).  Also set up properties
	 * used by HAL to identify removable media.
	 */
	(void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
	    DDI_KERNEL_IOCTL, NULL, 0);
	if (bd->d_removable) {
		(void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
		    "removable-media", NULL, 0);
	}
	if (bd->d_hotpluggable) {
		(void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
		    "hotpluggable", NULL, 0);
	}

	ddi_report_dev(dip);

	return (DDI_SUCCESS);
}
Esempio n. 7
0
/*ARGSUSED*/
static int
isa_ctlops(dev_info_t *dip, dev_info_t *rdip,
	ddi_ctl_enum_t ctlop, void *arg, void *result)
{
	int rn;
	struct ddi_parent_private_data *pdp;

	switch (ctlop) {
	case DDI_CTLOPS_REPORTDEV:
		if (rdip == (dev_info_t *)0)
			return (DDI_FAILURE);
		cmn_err(CE_CONT, "?ISA-device: %s%d\n",
		    ddi_driver_name(rdip), ddi_get_instance(rdip));
		return (DDI_SUCCESS);

	case DDI_CTLOPS_INITCHILD:
		/*
		 * older drivers aren't expecting the "standard" device
		 * node format used by the hardware nodes.  these drivers
		 * only expect their own properties set in their driver.conf
		 * files.  so they tell us not to call them with hardware
		 * nodes by setting the property "ignore-hardware-nodes".
		 */
		if (old_driver((dev_info_t *)arg)) {
			return (DDI_NOT_WELL_FORMED);
		}

		return (isa_initchild((dev_info_t *)arg));

	case DDI_CTLOPS_UNINITCHILD:
		impl_ddi_sunbus_removechild((dev_info_t *)arg);
		return (DDI_SUCCESS);

	case DDI_CTLOPS_SIDDEV:
		if (ndi_dev_is_persistent_node(rdip))
			return (DDI_SUCCESS);
		/*
		 * All ISA devices need to do confirming probes
		 * unless they are PnP ISA.
		 */
		if (is_pnpisa(rdip))
			return (DDI_SUCCESS);
		else
			return (DDI_FAILURE);

	case DDI_CTLOPS_REGSIZE:
	case DDI_CTLOPS_NREGS:
		if (rdip == (dev_info_t *)0)
			return (DDI_FAILURE);

		if ((pdp = ddi_get_parent_data(rdip)) == NULL)
			return (DDI_FAILURE);

		if (ctlop == DDI_CTLOPS_NREGS) {
			*(int *)result = pdp->par_nreg;
		} else {
			rn = *(int *)arg;
			if (rn >= pdp->par_nreg)
				return (DDI_FAILURE);
			*(off_t *)result = (off_t)pdp->par_reg[rn].regspec_size;
		}
		return (DDI_SUCCESS);

	case DDI_CTLOPS_ATTACH:
	case DDI_CTLOPS_DETACH:
	case DDI_CTLOPS_PEEK:
	case DDI_CTLOPS_POKE:
		return (DDI_FAILURE);

	default:
		return (ddi_ctlops(dip, rdip, ctlop, arg, result));
	}
}