static link_state_t virtionet_link_status(virtionet_state_t *sp) { link_state_t link; /* * If the status field is supported read the link status there, * otherwise link state "should be assumed active". */ if (sp->features & VIRTIO_NET_F_STATUS) { uint16_t status; status = ddi_get16(sp->devhandle, (uint16_t *)(sp->devaddr + VIRTIO_NET_CFG_STATUS)); if (status & VIRTIO_NET_S_LINK_UP) { link = LINK_STATE_UP; } else { link = LINK_STATE_DOWN; } } else { link = LINK_STATE_UP; } return (link); }
/*ARGSUSED*/ uint16_t isadma_get16(ddi_acc_impl_t *hdlp, uint16_t *addr) { ddi_acc_handle_t phdl = hdlp->ahi_common.ah_platform_private; isadma_devstate_t *isadmap = hdlp->ahi_common.ah_bus_private; off_t offset = (caddr_t)addr - hdlp->ahi_common.ah_addr; uint16_t ret = 0xffff; if (IN_CHILD_SPACE(offset)) { /* Pass to parent */ #ifdef DEBUG isadma_punt++; #endif return (ddi_get16(phdl, addr)); } #ifdef DEBUG isadma_check_waiters(isadmap); #endif mutex_enter(&isadmap->isadma_access_lock); isadma_dmawait(isadmap); /* wait until on-going dma completes */ /* Only Allow access to the 16 bit count and address registers */ if (!IN_16BIT_SPACE(offset)) goto exit; /* Set the sequencing register to the low byte */ ddi_put8(phdl, (uint8_t *)HDL_TO_SEQREG_ADDR(hdlp, offset), 0); /* Read the low byte, then high byte */ ret = ddi_get8(phdl, (uint8_t *)addr); ret = (ddi_get8(phdl, (uint8_t *)addr) << 8) | ret; exit: isadma_wakeup(isadmap); mutex_exit(&isadmap->isadma_access_lock); return (ret); }
static void rge_chip_peek_reg(rge_t *rgep, rge_peekpoke_t *ppd) { uint64_t regval; void *regaddr; RGE_TRACE(("rge_chip_peek_reg($%p, $%p)", (void *)rgep, (void *)ppd)); regaddr = PIO_ADDR(rgep, ppd->pp_acc_offset); switch (ppd->pp_acc_size) { case 1: regval = ddi_get8(rgep->io_handle, regaddr); break; case 2: regval = ddi_get16(rgep->io_handle, regaddr); break; case 4: regval = ddi_get32(rgep->io_handle, regaddr); break; case 8: regval = ddi_get64(rgep->io_handle, regaddr); break; } ppd->pp_acc_data = regval; }
static int mptsas_raidphydsk_page_0_cb(mptsas_t *mpt, caddr_t page_memp, ddi_acc_handle_t accessp, uint16_t iocstatus, uint32_t iocloginfo, va_list ap) { #ifndef __lock_lint _NOTE(ARGUNUSED(ap)) #endif pMpi2RaidPhysDiskPage0_t diskpage; int rval = DDI_SUCCESS; uint16_t *devhdl; uint8_t *state; if (iocstatus == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) return (DDI_FAILURE); if (iocstatus != MPI2_IOCSTATUS_SUCCESS) { mptsas_log(mpt, CE_WARN, "mptsas_raidphydsk_page0_cb " "config: IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus, iocloginfo); rval = DDI_FAILURE; return (rval); } devhdl = va_arg(ap, uint16_t *); state = va_arg(ap, uint8_t *); diskpage = (pMpi2RaidPhysDiskPage0_t)page_memp; *devhdl = ddi_get16(accessp, &diskpage->DevHandle); *state = ddi_get8(accessp, &diskpage->PhysDiskState); return (rval); }
uint16_t virtio_read_device_config_2(struct virtio_softc *sc, unsigned int index) { ASSERT(sc->sc_config_offset); return ddi_get16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index)); }
static uint16_t rge_reg_get16(rge_t *rgep, uintptr_t regno) { RGE_TRACE(("rge_reg_get16($%p, 0x%lx)", (void *)rgep, regno)); return (ddi_get16(rgep->io_handle, REG16(rgep, regno))); }
uint16_t pci_config_get16(ddi_acc_handle_t handle, off_t offset) { caddr_t cfgaddr; ddi_acc_hdl_t *hp; hp = impl_acc_hdl_get(handle); cfgaddr = hp->ah_addr + offset; return (ddi_get16(handle, (uint16_t *)cfgaddr)); }
static int fipe_ioat_trigger(void) { uint16_t ctrl; uint32_t err; uint8_t *addr = fipe_ioat_ctrl.ioat_reg_addr; ddi_acc_handle_t handle = fipe_ioat_ctrl.ioat_reg_handle; /* Check channel in use flag. */ ctrl = ddi_get16(handle, (uint16_t *)(addr + FIPE_IOAT_CHAN_CTRL)); if (ctrl & 0x100) { /* * Channel is in use by somebody else. IOAT driver may have * been loaded, forbid fipe from accessing IOAT hardware * anymore. */ fipe_ioat_ctrl.ioat_ready = B_FALSE; fipe_ioat_ctrl.ioat_failed = B_TRUE; FIPE_KSTAT_INC(ioat_start_fail_cnt); return (-1); } else { /* Set channel in use flag. */ ddi_put16(handle, (uint16_t *)(addr + FIPE_IOAT_CHAN_CTRL), 0x100); } /* Write command address. */ ddi_put32(handle, (uint32_t *)(addr + FIPE_IOAT_CHAN_ADDR_LO), (uint32_t)fipe_ioat_ctrl.ioat_cmd_physaddr); ddi_put32(handle, (uint32_t *)(addr + FIPE_IOAT_CHAN_ADDR_HI), (uint32_t)(fipe_ioat_ctrl.ioat_cmd_physaddr >> 32)); /* Check and clear error flags. */ err = ddi_get32(handle, (uint32_t *)(addr + FIPE_IOAT_CHAN_ERR)); if (err != 0) { ddi_put32(handle, (uint32_t *)(addr + FIPE_IOAT_CHAN_ERR), err); } /* Start channel. */ ddi_put8(handle, (uint8_t *)(addr + FIPE_IOAT_CHAN_CMD), 0x1); return (0); }
/* * Allocate/free a vq. */ struct virtqueue * virtio_alloc_vq(struct virtio_softc *sc, unsigned int index, unsigned int size, unsigned int indirect_num, const char *name) { int vq_size, allocsize1, allocsize2, allocsize = 0; int ret; unsigned int ncookies; size_t len; struct virtqueue *vq; ddi_put16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index); vq_size = ddi_get16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SIZE)); if (vq_size == 0) { dev_err(sc->sc_dev, CE_WARN, "virtqueue dest not exist, index %d for %s\n", index, name); goto out; } vq = kmem_zalloc(sizeof (struct virtqueue), KM_SLEEP); /* size 0 => use native vq size, good for receive queues. */ if (size) vq_size = MIN(vq_size, size); /* allocsize1: descriptor table + avail ring + pad */ allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size + sizeof (struct vring_avail) + sizeof (uint16_t) * vq_size); /* allocsize2: used ring + pad */ allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used) + sizeof (struct vring_used_elem) * vq_size); allocsize = allocsize1 + allocsize2; ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr, DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "Failed to allocate dma handle for vq %d", index); goto out_alloc_handle; } ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize, &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "Failed to allocate dma memory for vq %d", index); goto out_alloc; } ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL, (caddr_t)vq->vq_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies); if (ret != DDI_DMA_MAPPED) { dev_err(sc->sc_dev, CE_WARN, "Failed to bind dma memory for vq %d", index); goto out_bind; } /* We asked for a single segment */ ASSERT(ncookies == 1); /* and page-ligned buffers. */ ASSERT(vq->vq_dma_cookie.dmac_laddress % VIRTIO_PAGE_SIZE == 0); (void) memset(vq->vq_vaddr, 0, allocsize); /* Make sure all zeros hit the buffer before we point the host to it */ membar_producer(); /* set the vq address */ ddi_put32(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS), (vq->vq_dma_cookie.dmac_laddress / VIRTIO_PAGE_SIZE)); /* remember addresses and offsets for later use */ vq->vq_owner = sc; vq->vq_num = vq_size; vq->vq_index = index; vq->vq_descs = vq->vq_vaddr; vq->vq_availoffset = sizeof (struct vring_desc)*vq_size; vq->vq_avail = (void *)(((char *)vq->vq_descs) + vq->vq_availoffset); vq->vq_usedoffset = allocsize1; vq->vq_used = (void *)(((char *)vq->vq_descs) + vq->vq_usedoffset); ASSERT(indirect_num == 0 || virtio_has_feature(sc, VIRTIO_F_RING_INDIRECT_DESC)); vq->vq_indirect_num = indirect_num; /* free slot management */ vq->vq_entries = kmem_zalloc(sizeof (struct vq_entry) * vq_size, KM_SLEEP); ret = virtio_init_vq(sc, vq); if (ret) goto out_init; dev_debug(sc->sc_dev, CE_NOTE, "Allocated %d entries for vq %d:%s (%d indirect descs)", vq_size, index, name, indirect_num * vq_size); return (vq); out_init: kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq_size); (void) ddi_dma_unbind_handle(vq->vq_dma_handle); out_bind: ddi_dma_mem_free(&vq->vq_dma_acch); out_alloc: ddi_dma_free_handle(&vq->vq_dma_handle); out_alloc_handle: kmem_free(vq, sizeof (struct virtqueue)); out: return (NULL); }
static int virtio_enable_msi(struct virtio_softc *sc) { int ret, i; int vq_handler_count = sc->sc_intr_num; /* Number of handlers, not counting the counfig. */ if (sc->sc_intr_config) vq_handler_count--; /* Enable the iterrupts. Either the whole block, or one by one. */ if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) { ret = ddi_intr_block_enable(sc->sc_intr_htable, sc->sc_intr_num); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "Failed to enable MSI, falling back to INTx"); goto out_enable; } } else { for (i = 0; i < sc->sc_intr_num; i++) { ret = ddi_intr_enable(sc->sc_intr_htable[i]); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "Failed to enable MSI %d, " "falling back to INTx", i); while (--i >= 0) { (void) ddi_intr_disable( sc->sc_intr_htable[i]); } goto out_enable; } } } /* Bind the allocated MSI to the queues and config */ for (i = 0; i < vq_handler_count; i++) { int check; ddi_put16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), i); ddi_put16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_VECTOR), i); check = ddi_get16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_VECTOR)); if (check != i) { dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler " "for VQ %d, MSI %d. Check = %x", i, i, check); ret = ENODEV; goto out_bind; } } if (sc->sc_intr_config) { int check; ddi_put16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_CONFIG_VECTOR), i); check = ddi_get16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_CONFIG_VECTOR)); if (check != i) { dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler " "for Config updates, MSI %d", i); ret = ENODEV; goto out_bind; } } return (DDI_SUCCESS); out_bind: /* Unbind the vqs */ for (i = 0; i < vq_handler_count - 1; i++) { ddi_put16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), i); ddi_put16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_VECTOR), VIRTIO_MSI_NO_VECTOR); } /* And the config */ /* LINTED E_BAD_PTR_CAST_ALIGN */ ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_CONFIG_VECTOR), VIRTIO_MSI_NO_VECTOR); ret = DDI_FAILURE; out_enable: return (ret); }
uint16_t ipw2200_csr_get16(struct ipw2200_softc *sc, uint32_t off) { return (ddi_get16(sc->sc_ioh, (uint16_t *)((uintptr_t)sc->sc_regs + off))); }
/** * Virtio Pci get queue routine. Allocates a PCI queue and DMA resources. * * @param pDevice Pointer to the Virtio device instance. * @param pQueue Where to store the queue. * * @return An allocated Virtio Pci queue, or NULL in case of errors. */ static void *VirtioPciGetQueue(PVIRTIODEVICE pDevice, PVIRTIOQUEUE pQueue) { LogFlowFunc((VIRTIOLOGNAME ":VirtioPciGetQueue pDevice=%p pQueue=%p\n", pDevice, pQueue)); AssertReturn(pDevice, NULL); virtio_pci_t *pPci = pDevice->pvHyper; AssertReturn(pPci, NULL); /* * Select a Queue. */ ddi_put16(pPci->hIO, (uint16_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_SEL), pQueue->QueueIndex); /* * Get the currently selected Queue's size. */ pQueue->Ring.cDesc = ddi_get16(pPci->hIO, (uint16_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_NUM)); if (RT_UNLIKELY(!pQueue->Ring.cDesc)) { LogRel((VIRTIOLOGNAME ": VirtioPciGetQueue: Queue[%d] has no descriptors.\n", pQueue->QueueIndex)); return NULL; } /* * Check if it's already active. */ uint32_t QueuePFN = ddi_get32(pPci->hIO, (uint32_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_PFN)); if (QueuePFN != 0) { LogRel((VIRTIOLOGNAME ":VirtioPciGetQueue: Queue[%d] is already used.\n", pQueue->QueueIndex)); return NULL; } LogFlow(("Queue[%d] has %d slots.\n", pQueue->QueueIndex, pQueue->Ring.cDesc)); /* * Allocate and initialize Pci queue data. */ virtio_pci_queue_t *pPciQueue = RTMemAllocZ(sizeof(virtio_pci_queue_t)); if (pPciQueue) { /* * Setup DMA. */ size_t cbQueue = VirtioRingSize(pQueue->Ring.cDesc, VIRTIO_PCI_RING_ALIGN); int rc = ddi_dma_alloc_handle(pDevice->pDip, &g_VirtioPciDmaAttrRing, DDI_DMA_SLEEP, 0 /* addr */, &pPciQueue->hDMA); if (rc == DDI_SUCCESS) { rc = ddi_dma_mem_alloc(pPciQueue->hDMA, cbQueue, &g_VirtioPciAccAttrRing, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0 /* addr */, &pQueue->pQueue, &pPciQueue->cbBuf, &pPciQueue->hIO); if (rc == DDI_SUCCESS) { AssertRelease(pPciQueue->cbBuf >= cbQueue); ddi_dma_cookie_t DmaCookie; uint_t cCookies; rc = ddi_dma_addr_bind_handle(pPciQueue->hDMA, NULL /* addrspace */, pQueue->pQueue, pPciQueue->cbBuf, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0 /* addr */, &DmaCookie, &cCookies); if (rc == DDI_SUCCESS) { pPciQueue->physBuf = DmaCookie.dmac_laddress; pPciQueue->pageBuf = pPciQueue->physBuf >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; LogFlow((VIRTIOLOGNAME ":VirtioPciGetQueue: Queue[%d]%p physBuf=%x pfn of Buf %#x\n", pQueue->QueueIndex, pQueue->pQueue, pPciQueue->physBuf, pPciQueue->pageBuf)); cmn_err(CE_NOTE, ":VirtioPciGetQueue: Queue[%d]%p physBuf=%x pfn of Buf %x\n", pQueue->QueueIndex, pQueue->pQueue, pPciQueue->physBuf, pPciQueue->pageBuf); /* * Activate the queue and initialize a ring for the queue. */ memset(pQueue->pQueue, 0, pPciQueue->cbBuf); ddi_put32(pPci->hIO, (uint32_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_PFN), pPciQueue->pageBuf); VirtioRingInit(pQueue, pQueue->Ring.cDesc, pQueue->pQueue, VIRTIO_PCI_RING_ALIGN); return pPciQueue; } else
static int mptsas_raidconf_page_0_cb(mptsas_t *mpt, caddr_t page_memp, ddi_acc_handle_t accessp, uint16_t iocstatus, uint32_t iocloginfo, va_list ap) { #ifndef __lock_lint _NOTE(ARGUNUSED(ap)) #endif pMpi2RaidConfigurationPage0_t raidconfig_page0; pMpi2RaidConfig0ConfigElement_t element; uint32_t *confignum; int rval = DDI_SUCCESS, i; uint8_t numelements, vol, disk; uint16_t elementtype, voldevhandle; uint16_t etype_vol, etype_pd, etype_hs; uint16_t etype_oce; mptsas_slots_t *slots = mpt->m_active; m_raidconfig_t *raidconfig; uint64_t raidwwn; uint32_t native; mptsas_target_t *ptgt; uint32_t configindex; if (iocstatus == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) { return (DDI_FAILURE); } if (iocstatus != MPI2_IOCSTATUS_SUCCESS) { mptsas_log(mpt, CE_WARN, "mptsas_get_raid_conf_page0 " "config: IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus, iocloginfo); rval = DDI_FAILURE; return (rval); } confignum = va_arg(ap, uint32_t *); configindex = va_arg(ap, uint32_t); raidconfig_page0 = (pMpi2RaidConfigurationPage0_t)page_memp; /* * Get all RAID configurations. */ etype_vol = MPI2_RAIDCONFIG0_EFLAGS_VOLUME_ELEMENT; etype_pd = MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT; etype_hs = MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT; etype_oce = MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT; /* * Set up page address for next time through. */ *confignum = ddi_get8(accessp, &raidconfig_page0->ConfigNum); /* * Point to the right config in the structure. * Increment the number of valid RAID configs. */ raidconfig = &slots->m_raidconfig[configindex]; slots->m_num_raid_configs++; /* * Set the native flag if this is not a foreign * configuration. */ native = ddi_get32(accessp, &raidconfig_page0->Flags); if (native & MPI2_RAIDCONFIG0_FLAG_FOREIGN_CONFIG) { native = FALSE; } else { native = TRUE; } raidconfig->m_native = (uint8_t)native; /* * Get volume information for the volumes in the * config. */ numelements = ddi_get8(accessp, &raidconfig_page0->NumElements); vol = 0; disk = 0; element = (pMpi2RaidConfig0ConfigElement_t) &raidconfig_page0->ConfigElement; for (i = 0; ((i < numelements) && native); i++, element++) { /* * Get the element type. Could be Volume, * PhysDisk, Hot Spare, or Online Capacity * Expansion PhysDisk. */ elementtype = ddi_get16(accessp, &element->ElementFlags); elementtype &= MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE; /* * For volumes, get the RAID settings and the * WWID. */ if (elementtype == etype_vol) { voldevhandle = ddi_get16(accessp, &element->VolDevHandle); raidconfig->m_raidvol[vol].m_israid = 1; raidconfig->m_raidvol[vol]. m_raidhandle = voldevhandle; /* * Get the settings for the raid * volume. This includes the * DevHandles for the disks making up * the raid volume. */ if (mptsas_get_raid_settings(mpt, &raidconfig->m_raidvol[vol])) continue; /* * Get the WWID of the RAID volume for * SAS HBA */ if (mptsas_get_raid_wwid(mpt, &raidconfig->m_raidvol[vol])) continue; raidwwn = raidconfig->m_raidvol[vol]. m_raidwwid; /* * RAID uses phymask of 0. */ ptgt = mptsas_tgt_alloc(&slots->m_tgttbl, voldevhandle, raidwwn, 0, 0, 0, mpt); raidconfig->m_raidvol[vol].m_raidtgt = ptgt; /* * Increment volume index within this * raid config. */ vol++; } else if ((elementtype == etype_pd) || (elementtype == etype_hs) || (elementtype == etype_oce)) { /* * For all other element types, put * their DevHandles in the phys disk * list of the config. These are all * some variation of a Phys Disk and * this list is used to keep these * disks from going online. */ raidconfig->m_physdisk_devhdl[disk] = ddi_get16(accessp, &element->PhysDiskDevHandle); /* * Increment disk index within this * raid config. */ disk++; } } return (rval); }
/* * RAID Action for System Shutdown. This request uses the dedicated TM slot to * avoid a call to mptsas_save_cmd. Since Solaris requires that the mutex is * not held during the mptsas_quiesce function, this RAID action must not use * the normal code path of requests and replies. */ void mptsas_raid_action_system_shutdown(mptsas_t *mpt) { pMpi2RaidActionRequest_t action; uint8_t ir_active = FALSE, reply_type; uint8_t function, found_reply = FALSE; uint16_t SMID, action_type; mptsas_slots_t *slots = mpt->m_active; int config, vol; mptsas_cmd_t *cmd; uint32_t request_desc_low, reply_addr; int cnt; pMpi2ReplyDescriptorsUnion_t reply_desc_union; pMPI2DefaultReply_t reply; pMpi2AddressReplyDescriptor_t address_reply; /* * Before doing the system shutdown RAID Action, make sure that the IOC * supports IR and make sure there is a valid volume for the request. */ if (mpt->m_ir_capable) { for (config = 0; (config < slots->m_num_raid_configs) && (!ir_active); config++) { for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) { if (slots->m_raidconfig[config].m_raidvol[vol]. m_israid) { ir_active = TRUE; break; } } } } if (!ir_active) { return; } /* * If TM slot is already being used (highly unlikely), show message and * don't issue the RAID action. */ if (slots->m_slot[MPTSAS_TM_SLOT(mpt)] != NULL) { mptsas_log(mpt, CE_WARN, "RAID Action slot in use. Cancelling" " System Shutdown RAID Action.\n"); return; } /* * Create the cmd and put it in the dedicated TM slot. */ cmd = &(mpt->m_event_task_mgmt.m_event_cmd); bzero((caddr_t)cmd, sizeof (*cmd)); cmd->cmd_pkt = NULL; cmd->cmd_slot = MPTSAS_TM_SLOT(mpt); slots->m_slot[MPTSAS_TM_SLOT(mpt)] = cmd; /* * Form message for raid action. */ action = (pMpi2RaidActionRequest_t)(mpt->m_req_frame + (mpt->m_req_frame_size * cmd->cmd_slot)); bzero(action, mpt->m_req_frame_size); action->Function = MPI2_FUNCTION_RAID_ACTION; action->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED; /* * Send RAID Action. */ (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0, DDI_DMA_SYNC_FORDEV); request_desc_low = (cmd->cmd_slot << 16) + MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; MPTSAS_START_CMD(mpt, request_desc_low, 0); /* * Even though reply does not matter because the system is shutting * down, wait no more than 5 seconds here to get the reply just because * we don't want to leave it hanging if it's coming. Poll because * interrupts are disabled when this function is called. */ for (cnt = 0; cnt < 5000; cnt++) { /* * Check for a reply. */ (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0, DDI_DMA_SYNC_FORCPU); reply_desc_union = (pMpi2ReplyDescriptorsUnion_t) MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index); if (ddi_get32(mpt->m_acc_post_queue_hdl, &reply_desc_union->Words.Low) == 0xFFFFFFFF || ddi_get32(mpt->m_acc_post_queue_hdl, &reply_desc_union->Words.High) == 0xFFFFFFFF) { drv_usecwait(1000); continue; } /* * There is a reply. If it's not an address reply, ignore it. */ reply_type = ddi_get8(mpt->m_acc_post_queue_hdl, &reply_desc_union->Default.ReplyFlags); reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; if (reply_type != MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { goto clear_and_continue; } /* * SMID must be the TM slot since that's what we're using for * this RAID action. If not, ignore this reply. */ address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc_union; SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID); if (SMID != MPTSAS_TM_SLOT(mpt)) { goto clear_and_continue; } /* * If reply frame is not in the proper range ignore it. */ reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl, &address_reply->ReplyFrameAddress); if ((reply_addr < mpt->m_reply_frame_dma_addr) || (reply_addr >= (mpt->m_reply_frame_dma_addr + (mpt->m_reply_frame_size * mpt->m_free_queue_depth))) || ((reply_addr - mpt->m_reply_frame_dma_addr) % mpt->m_reply_frame_size != 0)) { goto clear_and_continue; } /* * If not a RAID action reply ignore it. */ (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0, DDI_DMA_SYNC_FORCPU); reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr - mpt->m_reply_frame_dma_addr)); function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function); if (function != MPI2_FUNCTION_RAID_ACTION) { goto clear_and_continue; } /* * Finally, make sure this is the System Shutdown RAID action. * If not, ignore reply. */ action_type = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->FunctionDependent1); if (action_type != MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED) { goto clear_and_continue; } found_reply = TRUE; clear_and_continue: /* * Clear the reply descriptor for re-use and increment index. */ ddi_put64(mpt->m_acc_post_queue_hdl, &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index], 0xFFFFFFFFFFFFFFFF); (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0, DDI_DMA_SYNC_FORDEV); /* * Update the global reply index and keep looking for the * reply if not found yet. */ if (++mpt->m_post_index == mpt->m_post_queue_depth) { mpt->m_post_index = 0; } ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index); if (!found_reply) { continue; } break; } /* * clear the used slot as the last step. */ slots->m_slot[MPTSAS_TM_SLOT(mpt)] = NULL; }