コード例 #1
0
static void
rge_chip_poke_reg(rge_t *rgep, rge_peekpoke_t *ppd)
{
	uint64_t regval;
	void *regaddr;

	RGE_TRACE(("rge_chip_poke_reg($%p, $%p)",
	    (void *)rgep, (void *)ppd));

	regaddr = PIO_ADDR(rgep, ppd->pp_acc_offset);
	regval = ppd->pp_acc_data;

	switch (ppd->pp_acc_size) {
	case 1:
		ddi_put8(rgep->io_handle, regaddr, regval);
		break;

	case 2:
		ddi_put16(rgep->io_handle, regaddr, regval);
		break;

	case 4:
		ddi_put32(rgep->io_handle, regaddr, regval);
		break;

	case 8:
		ddi_put64(rgep->io_handle, regaddr, regval);
		break;
	}
}
コード例 #2
0
void
ipw2200_csr_put32(struct ipw2200_softc *sc, uint32_t off,
	uint32_t val)
{
	ddi_put32(sc->sc_ioh,
	    (uint32_t *)((uintptr_t)sc->sc_regs + off), val);
}
コード例 #3
0
ファイル: virtio.c プロジェクト: libkeiser/illumos-nexenta
void
virtio_free_vq(struct virtqueue *vq)
{
	struct virtio_softc *sc = vq->vq_owner;
	int i;

	/* tell device that there's no virtqueue any longer */
	ddi_put16(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT),
	    vq->vq_index);
	ddi_put32(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS), 0);

	/* Free the indirect descriptors, if any. */
	for (i = 0; i < vq->vq_num; i++) {
		struct vq_entry *entry = &vq->vq_entries[i];
		if (entry->qe_indirect_descs)
			virtio_free_indirect(entry);
	}

	kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq->vq_num);

	(void) ddi_dma_unbind_handle(vq->vq_dma_handle);
	ddi_dma_mem_free(&vq->vq_dma_acch);
	ddi_dma_free_handle(&vq->vq_dma_handle);

	mutex_destroy(&vq->vq_used_lock);
	mutex_destroy(&vq->vq_avail_lock);
	mutex_destroy(&vq->vq_freelist_lock);

	kmem_free(vq, sizeof (struct virtqueue));
}
コード例 #4
0
ファイル: VirtioPci-solaris.c プロジェクト: OSLL/vboxhsm
/**
 * Set guest supported features.
 *
 * @param pDevice           Pointer to the Virtio device instance.
 * @param u32Features       Mask of guest supported features.
 */
static void VirtioPciSetFeatures(PVIRTIODEVICE pDevice, uint32_t u32Features)
{
    LogFlowFunc((VIRTIOLOGNAME ":VirtioPciSetFeatures pDevice=%p\n", pDevice));
    virtio_pci_t *pPciData = pDevice->pvHyper;
    AssertReturnVoid(pPciData);

    ddi_put32(pPciData->hIO, (uint32_t *)(pPciData->addrIOBase + VIRTIO_PCI_GUEST_FEATURES), u32Features);
}
コード例 #5
0
static void
rge_reg_put32(rge_t *rgep, uintptr_t regno, uint32_t data)
{
	RGE_TRACE(("rge_reg_put32($%p, 0x%lx, 0x%x)",
	    (void *)rgep, regno, data));

	ddi_put32(rgep->io_handle, REG32(rgep, regno), data);
}
コード例 #6
0
ファイル: virtio.c プロジェクト: libkeiser/illumos-nexenta
void
virtio_write_device_config_4(struct virtio_softc *sc, unsigned int index,
    uint32_t value)
{
	ASSERT(sc->sc_config_offset);
	ddi_put32(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
}
コード例 #7
0
ファイル: fipe_pm.c プロジェクト: bahamas10/openzfs
static int
fipe_ioat_trigger(void)
{
	uint16_t ctrl;
	uint32_t err;
	uint8_t	*addr = fipe_ioat_ctrl.ioat_reg_addr;
	ddi_acc_handle_t handle = fipe_ioat_ctrl.ioat_reg_handle;

	/* Check channel in use flag. */
	ctrl = ddi_get16(handle, (uint16_t *)(addr + FIPE_IOAT_CHAN_CTRL));
	if (ctrl & 0x100) {
		/*
		 * Channel is in use by somebody else. IOAT driver may have
		 * been loaded, forbid fipe from accessing IOAT hardware
		 * anymore.
		 */
		fipe_ioat_ctrl.ioat_ready = B_FALSE;
		fipe_ioat_ctrl.ioat_failed = B_TRUE;
		FIPE_KSTAT_INC(ioat_start_fail_cnt);
		return (-1);
	} else {
		/* Set channel in use flag. */
		ddi_put16(handle,
		    (uint16_t *)(addr + FIPE_IOAT_CHAN_CTRL), 0x100);
	}

	/* Write command address. */
	ddi_put32(handle,
	    (uint32_t *)(addr + FIPE_IOAT_CHAN_ADDR_LO),
	    (uint32_t)fipe_ioat_ctrl.ioat_cmd_physaddr);
	ddi_put32(handle, (uint32_t *)(addr + FIPE_IOAT_CHAN_ADDR_HI),
	    (uint32_t)(fipe_ioat_ctrl.ioat_cmd_physaddr >> 32));

	/* Check and clear error flags. */
	err = ddi_get32(handle, (uint32_t *)(addr + FIPE_IOAT_CHAN_ERR));
	if (err != 0) {
		ddi_put32(handle, (uint32_t *)(addr + FIPE_IOAT_CHAN_ERR), err);
	}

	/* Start channel. */
	ddi_put8(handle, (uint8_t *)(addr + FIPE_IOAT_CHAN_CMD), 0x1);

	return (0);
}
コード例 #8
0
void
pci_config_putl(ddi_acc_handle_t handle, off_t offset, uint32_t value)
{
	caddr_t	cfgaddr;
	ddi_acc_hdl_t *hp;

	hp = impl_acc_hdl_get(handle);
	cfgaddr = hp->ah_addr + offset;
	ddi_put32(handle, (uint32_t *)cfgaddr, value);
}
コード例 #9
0
ファイル: sgsbbc.c プロジェクト: andreiw/polaris
void
sbbc_disable_intr(sbbc_softstate_t *softsp)
{
	uint32_t	*pci_intr_enable_reg;

	/*
	 * Disable Interrupts now, turn off both INT#A lines
	 */
	pci_intr_enable_reg =  (uint32_t *)((char *)softsp->sbbc_regs +
		SBBC_PCI_INT_ENABLE);
	ddi_put32(softsp->sbbc_reg_handle1, pci_intr_enable_reg, 0);
}
コード例 #10
0
void quantis_reg_set(quantis_pci_device* qdev,
                     quantis_register reg,
                     quantis_register_value value)
{
  char msg[MAX_MSG_LEN];
  LOG_DEBUG2("In quantis_reg_set with reg=%d and value=%d\n", reg, value);
  if (reg % 4 !=  0)
  {
    snprintf(msg,
             MAX_MSG_LEN,
             "Offset (%d) in the registers array is not divisible by 4. This could crash the driver.\n",
             reg);
    QUANTIS_WARN(msg);
  }
  ddi_put32(qdev->regs_handle,
            (quantis_register_value *)(qdev->regs + reg),
            value);
}
コード例 #11
0
ファイル: VirtioPci-solaris.c プロジェクト: OSLL/vboxhsm
/**
 * Virtio Pci put queue routine. Places the queue and frees associated queue.
 *
 * @param pDevice           Pointer to the Virtio device instance.
 * @param pQueue            Pointer to the queue.
 */
static void VirtioPciPutQueue(PVIRTIODEVICE pDevice, PVIRTIOQUEUE pQueue)
{
    LogFlowFunc((VIRTIOLOGNAME ":VirtioPciPutQueue pDevice=%p pQueue=%p\n", pDevice, pQueue));
    AssertReturnVoid(pDevice);
    AssertReturnVoid(pQueue);

    virtio_pci_t *pPci = pDevice->pvHyper;
    AssertReturnVoid(pPci);
    virtio_pci_queue_t *pPciQueue = pQueue->pvData;
    if (RT_UNLIKELY(!pPciQueue))
    {
        LogRel((VIRTIOLOGNAME ":VirtioPciPutQueue missing Pci queue.\n"));
        return;
    }

    ddi_put16(pPci->hIO, (uint16_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_SEL), pQueue->QueueIndex);
    ddi_put32(pPci->hIO, (uint32_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_PFN), 0);

    ddi_dma_unbind_handle(pPciQueue->hDMA);
    ddi_dma_mem_free(&pPciQueue->hIO);
    ddi_dma_free_handle(&pPciQueue->hDMA);
    RTMemFree(pPciQueue);
}
コード例 #12
0
ファイル: virtio.c プロジェクト: libkeiser/illumos-nexenta
/*
 * Negotiate features, save the result in sc->sc_features
 */
uint32_t
virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
{
	uint32_t host_features;
	uint32_t features;

	host_features = ddi_get32(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_FEATURES));

	dev_debug(sc->sc_dev, CE_NOTE, "host features: %x, guest features: %x",
	    host_features, guest_features);

	features = host_features & guest_features;
	ddi_put32(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_GUEST_FEATURES),
	    features);

	sc->sc_features = features;

	return (host_features);
}
コード例 #13
0
ファイル: audio1575.c プロジェクト: apprisi/illumos-gate
/*
 * audio1575_alloc_port()
 *
 * Description:
 *	This routine allocates the DMA handles and the memory for the
 *	DMA engines to use.  It also configures the BDL lists properly
 *	for use.
 *
 * Arguments:
 *	dev_info_t	*dip	Pointer to the device's devinfo
 *	int		num	M1575_PLAY or M1575_REC
 *	uint8_t		nchan	Number of channels (2 = stereo, 6 = 5.1, etc.)
 *
 * Returns:
 *	DDI_SUCCESS		Registers successfully mapped
 *	DDI_FAILURE		Registers not successfully mapped
 */
static int
audio1575_alloc_port(audio1575_state_t *statep, int num, uint8_t nchan)
{
	ddi_dma_cookie_t	cookie;
	uint_t			count;
	int			dir;
	unsigned		caps;
	audio_dev_t		*adev;
	audio1575_port_t	*port;
	uint32_t		*kaddr;
	int			rc;
	dev_info_t		*dip;

	adev = statep->adev;
	dip = statep->dip;

	port = kmem_zalloc(sizeof (*port), KM_SLEEP);
	statep->ports[num] = port;
	port->num = num;
	port->statep = statep;
	port->nchan = nchan;

	if (num == M1575_REC) {
		dir = DDI_DMA_READ;
		caps = ENGINE_INPUT_CAP;
		port->sync_dir = DDI_DMA_SYNC_FORKERNEL;
	} else {
		dir = DDI_DMA_WRITE;
		caps = ENGINE_OUTPUT_CAP;
		port->sync_dir = DDI_DMA_SYNC_FORDEV;
	}

	/*
	 * We use one big sample area.  The sample area must be larger
	 * than about 1.5 framework fragment sizes.  (Currently 480 *
	 * 1.5 = 720 frames.)  This is necessary to ensure that we
	 * don't have to involve an interrupt service routine on our
	 * own, to keep the last valid index updated reasonably.
	 */
	port->nframes = 2048;
	port->samp_size = port->nframes * port->nchan * sizeof (int16_t);

	/* allocate dma handle */
	rc = ddi_dma_alloc_handle(dip, &sample_buf_dma_attr, DDI_DMA_SLEEP,
	    NULL, &port->samp_dmah);
	if (rc != DDI_SUCCESS) {
		audio_dev_warn(adev, "ddi_dma_alloc_handle failed: %d", rc);
		return (DDI_FAILURE);
	}
	/* allocate DMA buffer */
	rc = ddi_dma_mem_alloc(port->samp_dmah, port->samp_size, &buf_attr,
	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &port->samp_kaddr,
	    &port->samp_size, &port->samp_acch);
	if (rc == DDI_FAILURE) {
		audio_dev_warn(adev, "dma_mem_alloc failed");
		return (DDI_FAILURE);
	}

	/* bind DMA buffer */
	rc = ddi_dma_addr_bind_handle(port->samp_dmah, NULL,
	    port->samp_kaddr, port->samp_size, dir|DDI_DMA_CONSISTENT,
	    DDI_DMA_SLEEP, NULL, &cookie, &count);
	if ((rc != DDI_DMA_MAPPED) || (count != 1)) {
		audio_dev_warn(adev,
		    "ddi_dma_addr_bind_handle failed: %d", rc);
		return (DDI_FAILURE);
	}
	port->samp_paddr = cookie.dmac_address;

	/*
	 * now, from here we allocate DMA memory for buffer descriptor list.
	 * we allocate adjacent DMA memory for all DMA engines.
	 */
	rc = ddi_dma_alloc_handle(dip, &bdlist_dma_attr, DDI_DMA_SLEEP,
	    NULL, &port->bdl_dmah);
	if (rc != DDI_SUCCESS) {
		audio_dev_warn(adev, "ddi_dma_alloc_handle(bdlist) failed");
		return (DDI_FAILURE);
	}

	/*
	 * we allocate all buffer descriptors lists in continuous dma memory.
	 */
	port->bdl_size = sizeof (m1575_bd_entry_t) * M1575_BD_NUMS;
	rc = ddi_dma_mem_alloc(port->bdl_dmah, port->bdl_size,
	    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
	    &port->bdl_kaddr, &port->bdl_size, &port->bdl_acch);
	if (rc != DDI_SUCCESS) {
		audio_dev_warn(adev, "ddi_dma_mem_alloc(bdlist) failed");
		return (DDI_FAILURE);
	}

	/*
	 * Wire up the BD list.  We do this *before* binding the BD list
	 * so that we don't have to do an extra ddi_dma_sync.
	 */
	kaddr = (void *)port->bdl_kaddr;
	for (int i = 0; i < M1575_BD_NUMS; i++) {

		/* set base address of buffer */
		ddi_put32(port->bdl_acch, kaddr, port->samp_paddr);
		kaddr++;

		/* set size in frames, and enable IOC interrupt */
		ddi_put32(port->bdl_acch, kaddr,
		    ((port->samp_size / sizeof (int16_t)) | (1U << 31)));
		kaddr++;
	}

	rc = ddi_dma_addr_bind_handle(port->bdl_dmah, NULL, port->bdl_kaddr,
	    port->bdl_size, DDI_DMA_WRITE|DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
	    NULL, &cookie, &count);
	if ((rc != DDI_DMA_MAPPED) || (count != 1)) {
		audio_dev_warn(adev, "addr_bind_handle failed");
		return (DDI_FAILURE);
	}
	port->bdl_paddr = cookie.dmac_address;

	port->engine = audio_engine_alloc(&audio1575_engine_ops, caps);
	if (port->engine == NULL) {
		audio_dev_warn(adev, "audio_engine_alloc failed");
		return (DDI_FAILURE);
	}

	audio_engine_set_private(port->engine, port);
	audio_dev_add_engine(adev, port->engine);

	return (DDI_SUCCESS);
}
コード例 #14
0
ファイル: virtio.c プロジェクト: libkeiser/illumos-nexenta
/*
 * Allocate/free a vq.
 */
struct virtqueue *
virtio_alloc_vq(struct virtio_softc *sc, unsigned int index, unsigned int size,
    unsigned int indirect_num, const char *name)
{
	int vq_size, allocsize1, allocsize2, allocsize = 0;
	int ret;
	unsigned int ncookies;
	size_t len;
	struct virtqueue *vq;

	ddi_put16(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index);
	vq_size = ddi_get16(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SIZE));
	if (vq_size == 0) {
		dev_err(sc->sc_dev, CE_WARN,
		    "virtqueue dest not exist, index %d for %s\n", index, name);
		goto out;
	}

	vq = kmem_zalloc(sizeof (struct virtqueue), KM_SLEEP);

	/* size 0 => use native vq size, good for receive queues. */
	if (size)
		vq_size = MIN(vq_size, size);

	/* allocsize1: descriptor table + avail ring + pad */
	allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size +
	    sizeof (struct vring_avail) + sizeof (uint16_t) * vq_size);
	/* allocsize2: used ring + pad */
	allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used) +
	    sizeof (struct vring_used_elem) * vq_size);

	allocsize = allocsize1 + allocsize2;

	ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr,
	    DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle);
	if (ret != DDI_SUCCESS) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to allocate dma handle for vq %d", index);
		goto out_alloc_handle;
	}

	ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize,
	    &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
	    (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch);
	if (ret != DDI_SUCCESS) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to allocate dma memory for vq %d", index);
		goto out_alloc;
	}

	ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
	    (caddr_t)vq->vq_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
	    DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies);
	if (ret != DDI_DMA_MAPPED) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to bind dma memory for vq %d", index);
		goto out_bind;
	}

	/* We asked for a single segment */
	ASSERT(ncookies == 1);
	/* and page-ligned buffers. */
	ASSERT(vq->vq_dma_cookie.dmac_laddress % VIRTIO_PAGE_SIZE == 0);

	(void) memset(vq->vq_vaddr, 0, allocsize);

	/* Make sure all zeros hit the buffer before we point the host to it */
	membar_producer();

	/* set the vq address */
	ddi_put32(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS),
	    (vq->vq_dma_cookie.dmac_laddress / VIRTIO_PAGE_SIZE));

	/* remember addresses and offsets for later use */
	vq->vq_owner = sc;
	vq->vq_num = vq_size;
	vq->vq_index = index;
	vq->vq_descs = vq->vq_vaddr;
	vq->vq_availoffset = sizeof (struct vring_desc)*vq_size;
	vq->vq_avail = (void *)(((char *)vq->vq_descs) + vq->vq_availoffset);
	vq->vq_usedoffset = allocsize1;
	vq->vq_used = (void *)(((char *)vq->vq_descs) + vq->vq_usedoffset);

	ASSERT(indirect_num == 0 ||
	    virtio_has_feature(sc, VIRTIO_F_RING_INDIRECT_DESC));
	vq->vq_indirect_num = indirect_num;

	/* free slot management */
	vq->vq_entries = kmem_zalloc(sizeof (struct vq_entry) * vq_size,
	    KM_SLEEP);

	ret = virtio_init_vq(sc, vq);
	if (ret)
		goto out_init;

	dev_debug(sc->sc_dev, CE_NOTE,
	    "Allocated %d entries for vq %d:%s (%d indirect descs)",
	    vq_size, index, name, indirect_num * vq_size);

	return (vq);

out_init:
	kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq_size);
	(void) ddi_dma_unbind_handle(vq->vq_dma_handle);
out_bind:
	ddi_dma_mem_free(&vq->vq_dma_acch);
out_alloc:
	ddi_dma_free_handle(&vq->vq_dma_handle);
out_alloc_handle:
	kmem_free(vq, sizeof (struct virtqueue));
out:
	return (NULL);
}
コード例 #15
0
ファイル: sgsbbc.c プロジェクト: andreiw/polaris
static int
sbbc_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
{
	int			instance;
	sbbc_softstate_t	*softsp;
	uint32_t		*pci_intr_enable_reg;
	int			len;
#ifdef	DEBUG
	char			name[8];
#endif	/* DEBUG */

	instance = ddi_get_instance(devi);

	switch (cmd) {
	case DDI_ATTACH:

		if (ddi_soft_state_zalloc(sbbcp, instance) != 0)
			return (DDI_FAILURE);

		softsp = ddi_get_soft_state(sbbcp, instance);
		softsp->sbbc_instance = instance;

		/*
		 * Set the dip in the soft state
		 * And get interrupt cookies and initialize the
		 * per instance mutex.
		 */
		softsp_init(softsp, devi);


		/*
		 * Verify that an 'interrupts' property exists for
		 * this device. If not, this instance will be ignored.
		 */
		if (ddi_getproplen(DDI_DEV_T_ANY, softsp->dip,
			DDI_PROP_DONTPASS, "interrupts",
			&len) != DDI_PROP_SUCCESS) {
			SBBC_ERR1(CE_WARN, "No 'interrupts' property for the "
					"SBBC instance %d\n", instance);
			return (DDI_FAILURE);
		}
		/*
		 * Add this instance to the sbbc chosen iosram list
		 * so that it can be used for tunnel switch.
		 */
		mutex_enter(&chosen_lock);
		softsp->sbbc_state = SBBC_STATE_INIT;
		sbbc_add_instance(softsp);

		/*
		 * If this is the chosen IOSRAM and there is no master IOSRAM
		 * yet, then let's set this instance as the master.
		 * if there is a master alreay due to the previous tunnel switch
		 * then keep as is even though this is the chosen.
		 */
		if (sgsbbc_iosram_is_chosen(softsp)) {
			ASSERT(master_iosram);
			softsp->iosram = master_iosram;
			master_iosram->sgsbbc = softsp;

			/* Do 'chosen' init only */
			sbbc_chosen_init(softsp);
		}

		mutex_exit(&chosen_lock);
#ifdef	DEBUG
		(void) sprintf(name, "sbbc%d", instance);

		if (ddi_create_minor_node(devi, name, S_IFCHR, instance,
			NULL, NULL) == DDI_FAILURE) {
			mutex_destroy(&softsp->sbbc_lock);
			ddi_remove_minor_node(devi, NULL);
			ddi_soft_state_free(sbbcp, instance);
			return (DDI_FAILURE);
		}
#endif	/* DEBUG */

		ddi_report_dev(devi);

		return (DDI_SUCCESS);

	case DDI_RESUME:

		if (!(softsp = ddi_get_soft_state(sbbcp, instance)))
			return (DDI_FAILURE);

		mutex_enter(&softsp->sbbc_lock);
		if ((softsp->suspended == TRUE) && (softsp->chosen == TRUE)) {
			/*
			 * Enable Interrupts now, turn on both INT#A lines
			 */
			pci_intr_enable_reg =  (uint32_t *)
					((char *)softsp->sbbc_regs +
						SBBC_PCI_INT_ENABLE);

			ddi_put32(softsp->sbbc_reg_handle1,
				pci_intr_enable_reg,
				(uint32_t)SBBC_PCI_ENABLE_INT_A);

			/*
			 * Reset intr_in_enabled to the original value
			 * so the SC can send us interrupt.
			 */
			if (iosram_write(SBBC_SC_INTR_ENABLED_KEY,
				0, (caddr_t)&intr_in_enabled,
				sizeof (intr_in_enabled))) {

				mutex_exit(&softsp->sbbc_lock);
				return (DDI_FAILURE);
			}
		}
		softsp->suspended = FALSE;

		mutex_exit(&softsp->sbbc_lock);

		return (DDI_SUCCESS);

	default:
		return (DDI_FAILURE);
	}
}
コード例 #16
0
ファイル: adapter.c プロジェクト: mikess/illumos-gate
void
t4_write_reg(struct adapter *sc, uint32_t reg, uint32_t val)
{
    /* LINTED: E_BAD_PTR_CAST_ALIGN */
    ddi_put32(sc->regh, (uint32_t *)(sc->regp + reg), val);
}
コード例 #17
0
ファイル: sgsbbc.c プロジェクト: andreiw/polaris
static int
sbbc_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
{
	sbbc_softstate_t	*softsp;
	int			instance;
	uint32_t		*pci_intr_enable_reg;
	int			rc = DDI_SUCCESS;

	instance = ddi_get_instance(devi);

	if (!(softsp = ddi_get_soft_state(sbbcp, instance)))
		return (DDI_FAILURE);

	switch (cmd) {
	case DDI_DETACH:
		mutex_enter(&chosen_lock);
		softsp->sbbc_state |= SBBC_STATE_DETACH;
		mutex_exit(&chosen_lock);

		/* only tunnel switch the instance with iosram chosen */
		if (softsp->chosen == TRUE) {
			if (sgsbbc_iosram_switchfrom(softsp) == DDI_FAILURE) {
				SBBC_ERR(CE_WARN, "Cannot unconfigure: "
				    "tunnel switch failed\n");
				return (DDI_FAILURE);
			}
		}

		/* Adjust linked list */
		mutex_enter(&chosen_lock);
		sbbc_remove_instance(softsp);
		mutex_exit(&chosen_lock);

		sbbc_unmap_regs(softsp);
		mutex_destroy(&softsp->sbbc_lock);
		ddi_soft_state_free(sbbcp, instance);

		return (DDI_SUCCESS);

	case DDI_SUSPEND:

		mutex_enter(&softsp->sbbc_lock);

		if ((softsp->suspended == FALSE) && (softsp->chosen == TRUE)) {
			uint32_t	tmp_intr_enabled = 0;

			/*
			 * Disable Interrupts now, turn OFF both INT#A lines
			 */
			pci_intr_enable_reg =  (uint32_t *)
						((char *)softsp->sbbc_regs +
							SBBC_PCI_INT_ENABLE);

			ddi_put32(softsp->sbbc_reg_handle1,
				pci_intr_enable_reg, 0);

			/*
			 * Set intr_in_enabled to 0 so the SC won't send
			 * us interrupt.
			 */
			rc = iosram_read(SBBC_SC_INTR_ENABLED_KEY,
				0, (caddr_t)&intr_in_enabled,
				sizeof (intr_in_enabled));

			if (rc) {
				mutex_exit(&softsp->sbbc_lock);
				return (DDI_FAILURE);
			}

			rc = iosram_write(SBBC_SC_INTR_ENABLED_KEY,
				0, (caddr_t)&tmp_intr_enabled,
				sizeof (tmp_intr_enabled));

			if (rc) {
				mutex_exit(&softsp->sbbc_lock);
				return (DDI_FAILURE);
			}
		}
		softsp->suspended = TRUE;

		mutex_exit(&softsp->sbbc_lock);

		return (DDI_SUCCESS);

	default:
		return (DDI_FAILURE);
	}

}
コード例 #18
0
static int
acebus_config(ebus_devstate_t *ebus_p)
{
	ddi_acc_handle_t conf_handle;
	uint16_t comm;
#ifdef	ACEBUS_HOTPLUG
	int tcr_reg;
	caddr_t csr_io;
	ddi_device_acc_attr_t csr_attr = {   /* CSR map attributes */
		DDI_DEVICE_ATTR_V0,
		DDI_STRUCTURE_LE_ACC,
		DDI_STRICTORDER_ACC
	};
	ddi_acc_handle_t csr_handle;
#endif

	/*
	 * Make sure the master enable and memory access enable
	 * bits are set in the config command register.
	 */
	if (pci_config_setup(ebus_p->dip, &conf_handle) != DDI_SUCCESS)
		return (0);

	comm = pci_config_get16(conf_handle, PCI_CONF_COMM),
#ifdef DEBUG
	    DBG1(D_ATTACH, ebus_p, "command register was 0x%x\n", comm);
#endif
	comm |= (PCI_COMM_ME|PCI_COMM_MAE|PCI_COMM_SERR_ENABLE|
	    PCI_COMM_PARITY_DETECT);
	pci_config_put16(conf_handle, PCI_CONF_COMM, comm),
#ifdef DEBUG
	    DBG1(D_MAP, ebus_p, "command register is now 0x%x\n",
	    pci_config_get16(conf_handle, PCI_CONF_COMM));
#endif
	pci_config_put8(conf_handle, PCI_CONF_CACHE_LINESZ,
	    (uchar_t)acebus_cache_line_size);
	pci_config_put8(conf_handle, PCI_CONF_LATENCY_TIMER,
	    (uchar_t)acebus_latency_timer);
	pci_config_teardown(&conf_handle);

#ifdef	ACEBUS_HOTPLUG
	if (acebus_update_props(ebus_p) != DDI_SUCCESS) {
		cmn_err(CE_WARN, "%s%d: Could not update special properties.",
		    ddi_driver_name(ebus_p->dip),
		    ddi_get_instance(ebus_p->dip));
		return (0);
	}

	if (ddi_regs_map_setup(ebus_p->dip, CSR_IO_RINDEX,
	    (caddr_t *)&csr_io, 0, CSR_SIZE, &csr_attr,
	    &csr_handle) != DDI_SUCCESS) {
		cmn_err(CE_WARN, "%s%d: Could not map Ebus CSR.",
		    ddi_driver_name(ebus_p->dip),
		    ddi_get_instance(ebus_p->dip));
	}
#ifdef	DEBUG
	if (acebus_debug_flags) {
		DBG3(D_ATTACH, ebus_p, "tcr[123] = %x,%x,%x\n",
		    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
		    TCR1_OFF)),
		    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
		    TCR2_OFF)),
		    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
		    TCR3_OFF)));
		DBG2(D_ATTACH, ebus_p, "pmd-aux=%x, freq-aux=%x\n",
		    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
		    PMD_AUX_OFF)),
		    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
		    FREQ_AUX_OFF)));
#ifdef ACEBUS_DEBUG
		for (comm = 0; comm < 4; comm++)
			prom_printf("dcsr%d=%x, dacr%d=%x, dbcr%d=%x\n", comm,
			    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
			    0x700000+(0x2000*comm))), comm,
			    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
			    0x700000+(0x2000*comm)+4)), comm,
			    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
			    0x700000+(0x2000*comm)+8)));
#endif
	} /* acebus_debug_flags */
#endif
	/* If TCR registers are not initialized, initialize them here */
	tcr_reg = ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
	    TCR1_OFF));
	if ((tcr_reg == 0) || (tcr_reg == -1))
		ddi_put32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR1_OFF),
		    TCR1_REGVAL);
	tcr_reg = ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
	    TCR2_OFF));
	if ((tcr_reg == 0) || (tcr_reg == -1))
		ddi_put32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR2_OFF),
		    TCR2_REGVAL);
	tcr_reg = ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
	    TCR3_OFF));
	if ((tcr_reg == 0) || (tcr_reg == -1))
		ddi_put32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR3_OFF),
		    TCR3_REGVAL);
#ifdef	DEBUG
	if (acebus_debug_flags) {
		DBG3(D_ATTACH, ebus_p, "wrote tcr[123] = %x,%x,%x\n",
		    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
		    TCR1_OFF)),
		    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
		    TCR2_OFF)),
		    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
		    TCR3_OFF)));
	}
#endif

	ddi_regs_map_free(&csr_handle);
#endif	/* ACEBUS_HOTPLUG */
	return (1);	/* return success */
}
コード例 #19
0
ファイル: sgsbbc.c プロジェクト: andreiw/polaris
/*
 * SBBC Interrupt Handler
 *
 * Check the SBBC Port Interrupt Status
 * register to verify that its our interrupt.
 * If yes, clear the register.
 *
 * Then read the 'interrupt reason' field from SRAM,
 * this triggers the appropriate soft_intr handler
 */
uint_t
sbbc_intr_handler(caddr_t arg)
{
	sbbc_softstate_t	*softsp = (sbbc_softstate_t *)arg;
	uint32_t		*port_int_reg;
	volatile uint32_t	port_int_status;
	volatile uint32_t	intr_reason;
	uint32_t		intr_enabled;
	sbbc_intrs_t		*intr;
	int			i, intr_mask;
	struct tunnel_key	tunnel_key;
	ddi_acc_handle_t	intr_in_handle;
	uint32_t		*intr_in_reason;

	if (softsp == (sbbc_softstate_t *)NULL) {

		return (DDI_INTR_UNCLAIMED);
	}

	mutex_enter(&softsp->sbbc_lock);

	if (softsp->port_int_regs == NULL) {
		mutex_exit(&softsp->sbbc_lock);
		return (DDI_INTR_UNCLAIMED);
	}

	/*
	 * Normally if port_int_status is 0, we assume it is not
	 * our interrupt.  However, we don't want to miss the
	 * ones that come in during tunnel switch.  Therefore,
	 * we always check the interrupt reason bits in IOSRAM
	 * to be sure.
	 */
	port_int_reg = softsp->port_int_regs;

	port_int_status = ddi_get32(softsp->sbbc_reg_handle1, port_int_reg);

	/*
	 * Generate a softint for each interrupt
	 * bit set in the intr_in_reason field in SRAM
	 * that has a corresponding bit set in the
	 * intr_in_enabled field in SRAM
	 */

	if (iosram_read(SBBC_SC_INTR_ENABLED_KEY, 0,
		(caddr_t)&intr_enabled, sizeof (intr_enabled))) {

		goto intr_handler_exit;
	}

	tunnel_key = master_iosram->tunnel->tunnel_keys[SBBC_SC_INTR_KEY];
	intr_in_reason = (uint32_t *)tunnel_key.base;
	intr_in_handle = tunnel_key.reg_handle;

	intr_reason = ddi_get32(intr_in_handle, intr_in_reason);

	SGSBBC_DBG_INTR(CE_CONT, "intr_reason = %x\n", intr_reason);

	intr_reason &= intr_enabled;

	for (i = 0; i < SBBC_MAX_INTRS; i++) {
		intr_mask = (1 << i);
		if (intr_reason & intr_mask) {
			intr = &softsp->intr_hdlrs[i];
			if ((intr != NULL) &&
				(intr->sbbc_intr_id != 0)) {
				/*
				 * XXXX
				 * The model we agree with a handler
				 * is that they run until they have
				 * exhausted all work. To avoid
				 * triggering them again, they pass
				 * a state flag and lock when registering.
				 * We check the flag, if they are idle,
				 * we trigger.
				 * The interrupt handler should so
				 *   intr_func()
				 *	mutex_enter(sbbc_intr_lock);
				 *	sbbc_intr_state = RUNNING;
				 *	mutex_exit(sbbc_intr_lock);
				 *	  ..........
				 *	  ..........
				 *	  ..........
				 *	mutex_enter(sbbc_intr_lock);
				 *	sbbc_intr_state = IDLE;
				 *	mutex_exit(sbbc_intr_lock);
				 *
				 * XXXX
				 */
				mutex_enter(intr->sbbc_intr_lock);
				if (*(intr->sbbc_intr_state) ==
					SBBC_INTR_IDLE) {
					mutex_exit(intr->sbbc_intr_lock);
					ddi_trigger_softintr(
						intr->sbbc_intr_id);
				} else {
					/*
					 * The handler is running
					 */
					mutex_exit(intr->sbbc_intr_lock);
				}
				intr_reason &= ~intr_mask;
				/*
				 * Clear the corresponding reason bit in SRAM
				 *
				 * Since there is no interlocking between
				 * Solaris and the SC when writing to SRAM,
				 * it is possible for the SC to set another
				 * bit in the interrupt reason field while
				 * we are handling the current interrupt.
				 * To minimize the window in which an
				 * additional bit can be set, reading
				 * and writing the interrupt reason
				 * in SRAM must be as close as possible.
				 */
				ddi_put32(intr_in_handle, intr_in_reason,
					ddi_get32(intr_in_handle,
					intr_in_reason) & ~intr_mask);
			}
		}
		if (intr_reason == 0)	/* No more interrupts to be processed */
			break;
	}

	/*
	 * Clear the Interrupt Status Register (RW1C)
	 */
	ddi_put32(softsp->sbbc_reg_handle1, port_int_reg, port_int_status);

	port_int_status = ddi_get32(softsp->sbbc_reg_handle1, port_int_reg);

intr_handler_exit:

	mutex_exit(&softsp->sbbc_lock);

	return (DDI_INTR_CLAIMED);

}
コード例 #20
0
/*
 * audioixp_alloc_port()
 *
 * Description:
 *	This routine allocates the DMA handles and the memory for the
 *	DMA engines to use.  It also configures the BDL lists properly
 *	for use.
 *
 * Arguments:
 *	dev_info_t	*dip	Pointer to the device's devinfo
 *
 * Returns:
 *	DDI_SUCCESS		Registers successfully mapped
 *	DDI_FAILURE		Registers not successfully mapped
 */
static int
audioixp_alloc_port(audioixp_state_t *statep, int num)
{
	ddi_dma_cookie_t	cookie;
	uint_t			count;
	int			dir;
	unsigned		caps;
	char			*prop;
	audio_dev_t		*adev;
	audioixp_port_t		*port;
	uint32_t		paddr;
	int			rc;
	dev_info_t		*dip;
	audioixp_bd_entry_t	*bdentry;

	adev = statep->adev;
	dip = statep->dip;

	port = kmem_zalloc(sizeof (*port), KM_SLEEP);
	port->statep = statep;
	port->started = B_FALSE;
	port->num = num;

	switch (num) {
	case IXP_REC:
		statep->rec_port = port;
		prop = "record-interrupts";
		dir = DDI_DMA_READ;
		caps = ENGINE_INPUT_CAP;
		port->sync_dir = DDI_DMA_SYNC_FORKERNEL;
		port->nchan = 2;
		break;
	case IXP_PLAY:
		statep->play_port = port;
		prop = "play-interrupts";
		dir = DDI_DMA_WRITE;
		caps = ENGINE_OUTPUT_CAP;
		port->sync_dir = DDI_DMA_SYNC_FORDEV;
		/* This could possibly be conditionalized */
		port->nchan = 6;
		break;
	default:
		audio_dev_warn(adev, "bad port number (%d)!", num);
		return (DDI_FAILURE);
	}

	port->intrs = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
	    DDI_PROP_DONTPASS, prop, IXP_INTS);

	/* make sure the values are good */
	if (port->intrs < IXP_MIN_INTS) {
		audio_dev_warn(adev, "%s too low, %d, resetting to %d",
		    prop, port->intrs, IXP_INTS);
		port->intrs = IXP_INTS;
	} else if (port->intrs > IXP_MAX_INTS) {
		audio_dev_warn(adev, "%s too high, %d, resetting to %d",
		    prop, port->intrs, IXP_INTS);
		port->intrs = IXP_INTS;
	}

	/*
	 * Figure out how much space we need.  Sample rate is 48kHz, and
	 * we need to store 8 chunks.  (Note that this means that low
	 * interrupt frequencies will require more RAM.)
	 */
	port->fragfr = 48000 / port->intrs;
	port->fragfr = IXP_ROUNDUP(port->fragfr, IXP_MOD_SIZE);
	port->fragsz = port->fragfr * port->nchan * 2;
	port->samp_size = port->fragsz * IXP_BD_NUMS;

	/* allocate dma handle */
	rc = ddi_dma_alloc_handle(dip, &sample_buf_dma_attr, DDI_DMA_SLEEP,
	    NULL, &port->samp_dmah);
	if (rc != DDI_SUCCESS) {
		audio_dev_warn(adev, "ddi_dma_alloc_handle failed: %d", rc);
		return (DDI_FAILURE);
	}
	/* allocate DMA buffer */
	rc = ddi_dma_mem_alloc(port->samp_dmah, port->samp_size, &buf_attr,
	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &port->samp_kaddr,
	    &port->samp_size, &port->samp_acch);
	if (rc == DDI_FAILURE) {
		audio_dev_warn(adev, "dma_mem_alloc failed");
		return (DDI_FAILURE);
	}

	/* bind DMA buffer */
	rc = ddi_dma_addr_bind_handle(port->samp_dmah, NULL,
	    port->samp_kaddr, port->samp_size, dir|DDI_DMA_CONSISTENT,
	    DDI_DMA_SLEEP, NULL, &cookie, &count);
	if ((rc != DDI_DMA_MAPPED) || (count != 1)) {
		audio_dev_warn(adev,
		    "ddi_dma_addr_bind_handle failed: %d", rc);
		return (DDI_FAILURE);
	}
	port->samp_paddr = cookie.dmac_address;

	/*
	 * now, from here we allocate DMA memory for buffer descriptor list.
	 * we allocate adjacent DMA memory for all DMA engines.
	 */
	rc = ddi_dma_alloc_handle(dip, &bdlist_dma_attr, DDI_DMA_SLEEP,
	    NULL, &port->bdl_dmah);
	if (rc != DDI_SUCCESS) {
		audio_dev_warn(adev, "ddi_dma_alloc_handle(bdlist) failed");
		return (DDI_FAILURE);
	}

	/*
	 * we allocate all buffer descriptors lists in continuous dma memory.
	 */
	port->bdl_size = sizeof (audioixp_bd_entry_t) * IXP_BD_NUMS;
	rc = ddi_dma_mem_alloc(port->bdl_dmah, port->bdl_size,
	    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
	    &port->bdl_kaddr, &port->bdl_size, &port->bdl_acch);
	if (rc != DDI_SUCCESS) {
		audio_dev_warn(adev, "ddi_dma_mem_alloc(bdlist) failed");
		return (DDI_FAILURE);
	}

	rc = ddi_dma_addr_bind_handle(port->bdl_dmah, NULL, port->bdl_kaddr,
	    port->bdl_size, DDI_DMA_WRITE|DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
	    NULL, &cookie, &count);
	if ((rc != DDI_DMA_MAPPED) || (count != 1)) {
		audio_dev_warn(adev, "addr_bind_handle failed");
		return (DDI_FAILURE);
	}
	port->bdl_paddr = cookie.dmac_address;

	/*
	 * Wire up the BD list.
	 */
	paddr = port->samp_paddr;
	bdentry = (void *)port->bdl_kaddr;

	for (int i = 0; i < IXP_BD_NUMS; i++) {

		/* set base address of buffer */
		ddi_put32(port->bdl_acch, &bdentry->buf_base, paddr);
		ddi_put16(port->bdl_acch, &bdentry->status, 0);
		ddi_put16(port->bdl_acch, &bdentry->buf_len, port->fragsz / 4);
		ddi_put32(port->bdl_acch, &bdentry->next, port->bdl_paddr +
		    (((i + 1) % IXP_BD_NUMS) * sizeof (audioixp_bd_entry_t)));
		paddr += port->fragsz;
		bdentry++;
	}
	(void) ddi_dma_sync(port->bdl_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);

	port->engine = audio_engine_alloc(&audioixp_engine_ops, caps);
	if (port->engine == NULL) {
		audio_dev_warn(adev, "audio_engine_alloc failed");
		return (DDI_FAILURE);
	}

	audio_engine_set_private(port->engine, port);
	audio_dev_add_engine(adev, port->engine);

	return (DDI_SUCCESS);
}
コード例 #21
0
ファイル: VirtioPci-solaris.c プロジェクト: OSLL/vboxhsm
/**
 * Virtio Pci get queue routine. Allocates a PCI queue and DMA resources.
 *
 * @param pDevice           Pointer to the Virtio device instance.
 * @param pQueue            Where to store the queue.
 *
 * @return An allocated Virtio Pci queue, or NULL in case of errors.
 */
static void *VirtioPciGetQueue(PVIRTIODEVICE pDevice, PVIRTIOQUEUE pQueue)
{
    LogFlowFunc((VIRTIOLOGNAME ":VirtioPciGetQueue pDevice=%p pQueue=%p\n", pDevice, pQueue));
    AssertReturn(pDevice, NULL);

    virtio_pci_t *pPci = pDevice->pvHyper;
    AssertReturn(pPci, NULL);

    /*
     * Select a Queue.
     */
    ddi_put16(pPci->hIO, (uint16_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_SEL), pQueue->QueueIndex);

    /*
     * Get the currently selected Queue's size.
     */
    pQueue->Ring.cDesc = ddi_get16(pPci->hIO, (uint16_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_NUM));
    if (RT_UNLIKELY(!pQueue->Ring.cDesc))
    {
        LogRel((VIRTIOLOGNAME ": VirtioPciGetQueue: Queue[%d] has no descriptors.\n", pQueue->QueueIndex));
        return NULL;
    }

    /*
     * Check if it's already active.
     */
    uint32_t QueuePFN = ddi_get32(pPci->hIO, (uint32_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_PFN));
    if (QueuePFN != 0)
    {
        LogRel((VIRTIOLOGNAME ":VirtioPciGetQueue: Queue[%d] is already used.\n", pQueue->QueueIndex));
        return NULL;
    }

    LogFlow(("Queue[%d] has %d slots.\n", pQueue->QueueIndex, pQueue->Ring.cDesc));

    /*
     * Allocate and initialize Pci queue data.
     */
    virtio_pci_queue_t *pPciQueue = RTMemAllocZ(sizeof(virtio_pci_queue_t));
    if (pPciQueue)
    {
        /*
         * Setup DMA.
         */
        size_t cbQueue = VirtioRingSize(pQueue->Ring.cDesc, VIRTIO_PCI_RING_ALIGN);
        int rc = ddi_dma_alloc_handle(pDevice->pDip, &g_VirtioPciDmaAttrRing, DDI_DMA_SLEEP, 0 /* addr */, &pPciQueue->hDMA);
        if (rc == DDI_SUCCESS)
        {
            rc = ddi_dma_mem_alloc(pPciQueue->hDMA, cbQueue, &g_VirtioPciAccAttrRing, DDI_DMA_CONSISTENT,
                                   DDI_DMA_SLEEP, 0 /* addr */, &pQueue->pQueue, &pPciQueue->cbBuf,
                                   &pPciQueue->hIO);
            if (rc == DDI_SUCCESS)
            {
                AssertRelease(pPciQueue->cbBuf >= cbQueue);
                ddi_dma_cookie_t DmaCookie;
                uint_t cCookies;
                rc = ddi_dma_addr_bind_handle(pPciQueue->hDMA, NULL /* addrspace */, pQueue->pQueue, pPciQueue->cbBuf,
                                              DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
                                              0 /* addr */, &DmaCookie, &cCookies);
                if (rc == DDI_SUCCESS)
                {
                    pPciQueue->physBuf = DmaCookie.dmac_laddress;
                    pPciQueue->pageBuf = pPciQueue->physBuf >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;

                    LogFlow((VIRTIOLOGNAME ":VirtioPciGetQueue: Queue[%d]%p physBuf=%x pfn of Buf %#x\n", pQueue->QueueIndex,
                             pQueue->pQueue, pPciQueue->physBuf, pPciQueue->pageBuf));
                    cmn_err(CE_NOTE, ":VirtioPciGetQueue: Queue[%d]%p physBuf=%x pfn of Buf %x\n", pQueue->QueueIndex,
                             pQueue->pQueue, pPciQueue->physBuf, pPciQueue->pageBuf);

                    /*
                     * Activate the queue and initialize a ring for the queue.
                     */
                    memset(pQueue->pQueue, 0, pPciQueue->cbBuf);
                    ddi_put32(pPci->hIO, (uint32_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_PFN), pPciQueue->pageBuf);
                    VirtioRingInit(pQueue, pQueue->Ring.cDesc, pQueue->pQueue, VIRTIO_PCI_RING_ALIGN);
                    return pPciQueue;
                }
                else
コード例 #22
0
/*
 * RAID Action for System Shutdown. This request uses the dedicated TM slot to
 * avoid a call to mptsas_save_cmd.  Since Solaris requires that the mutex is
 * not held during the mptsas_quiesce function, this RAID action must not use
 * the normal code path of requests and replies.
 */
void
mptsas_raid_action_system_shutdown(mptsas_t *mpt)
{
	pMpi2RaidActionRequest_t	action;
	uint8_t				ir_active = FALSE, reply_type;
	uint8_t				function, found_reply = FALSE;
	uint16_t			SMID, action_type;
	mptsas_slots_t			*slots = mpt->m_active;
	int				config, vol;
	mptsas_cmd_t			*cmd;
	uint32_t			request_desc_low, reply_addr;
	int				cnt;
	pMpi2ReplyDescriptorsUnion_t	reply_desc_union;
	pMPI2DefaultReply_t		reply;
	pMpi2AddressReplyDescriptor_t	address_reply;

	/*
	 * Before doing the system shutdown RAID Action, make sure that the IOC
	 * supports IR and make sure there is a valid volume for the request.
	 */
	if (mpt->m_ir_capable) {
		for (config = 0; (config < slots->m_num_raid_configs) &&
		    (!ir_active); config++) {
			for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
				if (slots->m_raidconfig[config].m_raidvol[vol].
				    m_israid) {
					ir_active = TRUE;
					break;
				}
			}
		}
	}
	if (!ir_active) {
		return;
	}

	/*
	 * If TM slot is already being used (highly unlikely), show message and
	 * don't issue the RAID action.
	 */
	if (slots->m_slot[MPTSAS_TM_SLOT(mpt)] != NULL) {
		mptsas_log(mpt, CE_WARN, "RAID Action slot in use.  Cancelling"
		    " System Shutdown RAID Action.\n");
		return;
	}

	/*
	 * Create the cmd and put it in the dedicated TM slot.
	 */
	cmd = &(mpt->m_event_task_mgmt.m_event_cmd);
	bzero((caddr_t)cmd, sizeof (*cmd));
	cmd->cmd_pkt = NULL;
	cmd->cmd_slot = MPTSAS_TM_SLOT(mpt);
	slots->m_slot[MPTSAS_TM_SLOT(mpt)] = cmd;

	/*
	 * Form message for raid action.
	 */
	action = (pMpi2RaidActionRequest_t)(mpt->m_req_frame +
	    (mpt->m_req_frame_size * cmd->cmd_slot));
	bzero(action, mpt->m_req_frame_size);
	action->Function = MPI2_FUNCTION_RAID_ACTION;
	action->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;

	/*
	 * Send RAID Action.
	 */
	(void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
	    DDI_DMA_SYNC_FORDEV);
	request_desc_low = (cmd->cmd_slot << 16) +
	    MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
	MPTSAS_START_CMD(mpt, request_desc_low, 0);

	/*
	 * Even though reply does not matter because the system is shutting
	 * down, wait no more than 5 seconds here to get the reply just because
	 * we don't want to leave it hanging if it's coming.  Poll because
	 * interrupts are disabled when this function is called.
	 */
	for (cnt = 0; cnt < 5000; cnt++) {
		/*
		 * Check for a reply.
		 */
		(void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
		    DDI_DMA_SYNC_FORCPU);

		reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
		    MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);

		if (ddi_get32(mpt->m_acc_post_queue_hdl,
		    &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
		    ddi_get32(mpt->m_acc_post_queue_hdl,
		    &reply_desc_union->Words.High) == 0xFFFFFFFF) {
			drv_usecwait(1000);
			continue;
		}

		/*
		 * There is a reply.  If it's not an address reply, ignore it.
		 */
		reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
		    &reply_desc_union->Default.ReplyFlags);
		reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
		if (reply_type != MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
			goto clear_and_continue;
		}

		/*
		 * SMID must be the TM slot since that's what we're using for
		 * this RAID action.  If not, ignore this reply.
		 */
		address_reply =
		    (pMpi2AddressReplyDescriptor_t)reply_desc_union;
		SMID = ddi_get16(mpt->m_acc_post_queue_hdl,
		    &address_reply->SMID);
		if (SMID != MPTSAS_TM_SLOT(mpt)) {
			goto clear_and_continue;
		}

		/*
		 * If reply frame is not in the proper range ignore it.
		 */
		reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
		    &address_reply->ReplyFrameAddress);
		if ((reply_addr < mpt->m_reply_frame_dma_addr) ||
		    (reply_addr >= (mpt->m_reply_frame_dma_addr +
		    (mpt->m_reply_frame_size * mpt->m_free_queue_depth))) ||
		    ((reply_addr - mpt->m_reply_frame_dma_addr) %
		    mpt->m_reply_frame_size != 0)) {
			goto clear_and_continue;
		}

		/*
		 * If not a RAID action reply ignore it.
		 */
		(void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
		    DDI_DMA_SYNC_FORCPU);
		reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame +
		    (reply_addr - mpt->m_reply_frame_dma_addr));
		function = ddi_get8(mpt->m_acc_reply_frame_hdl,
		    &reply->Function);
		if (function != MPI2_FUNCTION_RAID_ACTION) {
			goto clear_and_continue;
		}

		/*
		 * Finally, make sure this is the System Shutdown RAID action.
		 * If not, ignore reply.
		 */
		action_type = ddi_get16(mpt->m_acc_reply_frame_hdl,
		    &reply->FunctionDependent1);
		if (action_type !=
		    MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED) {
			goto clear_and_continue;
		}
		found_reply = TRUE;

clear_and_continue:
		/*
		 * Clear the reply descriptor for re-use and increment index.
		 */
		ddi_put64(mpt->m_acc_post_queue_hdl,
		    &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
		    0xFFFFFFFFFFFFFFFF);
		(void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
		    DDI_DMA_SYNC_FORDEV);

		/*
		 * Update the global reply index and keep looking for the
		 * reply if not found yet.
		 */
		if (++mpt->m_post_index == mpt->m_post_queue_depth) {
			mpt->m_post_index = 0;
		}
		ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyPostHostIndex,
		    mpt->m_post_index);
		if (!found_reply) {
			continue;
		}

		break;
	}

	/*
	 * clear the used slot as the last step.
	 */
	slots->m_slot[MPTSAS_TM_SLOT(mpt)] = NULL;
}