/*ARGSUSED*/ int drm_get_pci_index_reg(dev_info_t *devi, uint_t physical, uint_t size, off_t *off) { int length; pci_regspec_t *regs; int n_reg, i; int regnum; uint_t base, regsize; regnum = -1; if (ddi_dev_nregs(devi, &n_reg) == DDI_FAILURE) { DRM_ERROR("drm_get_pci_index_reg:ddi_dev_nregs failed\n"); n_reg = 0; return (-1); } if (ddi_getlongprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, "assigned-addresses", (caddr_t)®s, &length) != DDI_PROP_SUCCESS) { DRM_ERROR("drm_get_pci_index_reg: ddi_getlongprop failed!\n"); goto error; } for (i = 0; i < n_reg; i ++) { base = (uint_t)regs[i].pci_phys_low; regsize = (uint_t)regs[i].pci_size_low; if ((uint_t)physical >= base && (uint_t)physical < (base + regsize)) { regnum = i + 1; *off = (off_t)(physical - base); break; } } kmem_free(regs, (size_t)length); return (regnum); error: kmem_free(regs, (size_t)length); return (-1); }
int drm_get_pci_index_reg(dev_info_t *dip, uint_t paddr, uint_t size, off_t *off) { pci_regspec_t *regs = NULL; int len; uint_t regbase, regsize; int nregs, i; int regnum; regnum = -1; if (ddi_dev_nregs(dip, &nregs) == DDI_FAILURE) { DRM_ERROR("ddi_dev_nregs() failed"); return (-1); } if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "assigned-addresses", (caddr_t)®s, &len) != DDI_PROP_SUCCESS) { DRM_ERROR("ddi_getlongprop() failed"); if (regs) kmem_free(regs, (size_t)len); return (-1); } for (i = 0; i < nregs; i ++) { regbase = (uint_t)regs[i].pci_phys_low; regsize = (uint_t)regs[i].pci_size_low; if ((uint_t)paddr >= regbase && (uint_t)paddr < (regbase + regsize)) { regnum = i + 1; *off = (off_t)(paddr - regbase); break; } } kmem_free(regs, (size_t)len); return (regnum); }
static int rmc_comm_online(struct rmc_comm_state *rcs, dev_info_t *dip) { ddi_acc_handle_t h; caddr_t p; int nregs; int err; if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) nregs = 0; switch (nregs) { default: case 1: /* * regset 0 represents the SIO operating registers */ err = ddi_regs_map_setup(dip, 0, &p, 0, 0, rmc_comm_dev_acc_attr, &h); if (err != DDI_SUCCESS) return (EIO); rcs->sd_state.sio_handle = h; rcs->sd_state.sio_regs = (void *)p; break; case 0: /* * If no registers are defined, succeed vacuously; * commands will be accepted, but we fake the accesses. */ break; } /* * Now that the registers are mapped, we can initialise the SIO h/w */ rmc_comm_hw_reset(rcs); return (0); }
static int oce_map_regs(struct oce_dev *dev) { int ret = 0; off_t bar_size = 0; ASSERT(NULL != dev); ASSERT(NULL != dev->dip); /* get number of supported bars */ ret = ddi_dev_nregs(dev->dip, &dev->num_bars); if (ret != DDI_SUCCESS) { oce_log(dev, CE_WARN, MOD_CONFIG, "%d: could not retrieve num_bars", MOD_CONFIG); return (DDI_FAILURE); } /* verify each bar and map it accordingly */ /* PCI CFG */ ret = ddi_dev_regsize(dev->dip, OCE_DEV_CFG_BAR, &bar_size); if (ret != DDI_SUCCESS) { oce_log(dev, CE_WARN, MOD_CONFIG, "Could not get sizeof BAR %d", OCE_DEV_CFG_BAR); return (DDI_FAILURE); } ret = ddi_regs_map_setup(dev->dip, OCE_DEV_CFG_BAR, &dev->dev_cfg_addr, 0, bar_size, ®_accattr, &dev->dev_cfg_handle); if (ret != DDI_SUCCESS) { oce_log(dev, CE_WARN, MOD_CONFIG, "Could not map bar %d", OCE_DEV_CFG_BAR); return (DDI_FAILURE); } /* CSR */ ret = ddi_dev_regsize(dev->dip, OCE_PCI_CSR_BAR, &bar_size); if (ret != DDI_SUCCESS) { oce_log(dev, CE_WARN, MOD_CONFIG, "Could not get sizeof BAR %d", OCE_PCI_CSR_BAR); return (DDI_FAILURE); } ret = ddi_regs_map_setup(dev->dip, OCE_PCI_CSR_BAR, &dev->csr_addr, 0, bar_size, ®_accattr, &dev->csr_handle); if (ret != DDI_SUCCESS) { oce_log(dev, CE_WARN, MOD_CONFIG, "Could not map bar %d", OCE_PCI_CSR_BAR); ddi_regs_map_free(&dev->dev_cfg_handle); return (DDI_FAILURE); } /* Doorbells */ ret = ddi_dev_regsize(dev->dip, OCE_PCI_DB_BAR, &bar_size); if (ret != DDI_SUCCESS) { oce_log(dev, CE_WARN, MOD_CONFIG, "%d Could not get sizeof BAR %d", ret, OCE_PCI_DB_BAR); ddi_regs_map_free(&dev->csr_handle); ddi_regs_map_free(&dev->dev_cfg_handle); return (DDI_FAILURE); } ret = ddi_regs_map_setup(dev->dip, OCE_PCI_DB_BAR, &dev->db_addr, 0, 0, ®_accattr, &dev->db_handle); if (ret != DDI_SUCCESS) { oce_log(dev, CE_WARN, MOD_CONFIG, "Could not map bar %d", OCE_PCI_DB_BAR); ddi_regs_map_free(&dev->csr_handle); ddi_regs_map_free(&dev->dev_cfg_handle); return (DDI_FAILURE); } return (DDI_SUCCESS); }
/*ARGSUSED*/ static void fipe_ioat_alloc(void *arg) { int rc = 0, nregs; dev_info_t *dip; ddi_device_acc_attr_t attr; boolean_t fatal = B_FALSE; mutex_enter(&fipe_ioat_ctrl.ioat_lock); /* * fipe_ioat_alloc() is called in DEVICE ATTACH context when loaded. * In DEVICE ATTACH context, it can't call ddi_walk_devs(), so just * schedule a timer and exit. */ if (fipe_ioat_ctrl.ioat_try_alloc == B_FALSE) { fipe_ioat_ctrl.ioat_try_alloc = B_TRUE; goto out_error; } /* Check whether has been initialized or encountered permanent error. */ if (fipe_ioat_ctrl.ioat_ready || fipe_ioat_ctrl.ioat_failed || fipe_ioat_ctrl.ioat_cancel) { fipe_ioat_ctrl.ioat_timerid = 0; mutex_exit(&fipe_ioat_ctrl.ioat_lock); return; } if (fipe_ioat_ctrl.ioat_dev_info == NULL) { /* Find dev_info_t for IOAT engine. */ ddi_walk_devs(ddi_root_node(), fipe_search_ioat_dev, NULL); if (fipe_ioat_ctrl.ioat_dev_info == NULL) { cmn_err(CE_NOTE, "!fipe: no IOAT hardware found, disable pm."); fatal = B_TRUE; goto out_error; } } /* Map in IOAT control register window. */ ASSERT(fipe_ioat_ctrl.ioat_dev_info != NULL); ASSERT(fipe_ioat_ctrl.ioat_reg_mapped == B_FALSE); dip = fipe_ioat_ctrl.ioat_dev_info; if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS || nregs < 2) { cmn_err(CE_WARN, "!fipe: ioat has not enough register bars."); fatal = B_TRUE; goto out_error; } attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; rc = ddi_regs_map_setup(dip, 1, (caddr_t *)&fipe_ioat_ctrl.ioat_reg_addr, 0, 0, &attr, &fipe_ioat_ctrl.ioat_reg_handle); if (rc != DDI_SUCCESS) { cmn_err(CE_WARN, "!fipe: failed to map IOAT registeres."); fatal = B_TRUE; goto out_error; } /* Mark IOAT status. */ fipe_ioat_ctrl.ioat_reg_mapped = B_TRUE; fipe_ioat_ctrl.ioat_ready = B_TRUE; fipe_ioat_ctrl.ioat_failed = B_FALSE; fipe_ioat_ctrl.ioat_timerid = 0; mutex_exit(&fipe_ioat_ctrl.ioat_lock); return; out_error: fipe_ioat_ctrl.ioat_timerid = 0; if (!fipe_ioat_ctrl.ioat_ready && !fipe_ioat_ctrl.ioat_cancel) { if (fatal) { /* Mark permanent error and give up. */ fipe_ioat_ctrl.ioat_failed = B_TRUE; /* Release reference count hold by ddi_find_devinfo. */ if (fipe_ioat_ctrl.ioat_dev_info != NULL) { ndi_rele_devi(fipe_ioat_ctrl.ioat_dev_info); fipe_ioat_ctrl.ioat_dev_info = NULL; } } else { /* * Schedule another timer to keep on trying. * timeout() should always succeed, no need to check * return. */ fipe_ioat_ctrl.ioat_timerid = timeout(fipe_ioat_alloc, NULL, drv_usectohz(FIPE_IOAT_RETRY_INTERVAL)); } } mutex_exit(&fipe_ioat_ctrl.ioat_lock); }
/** * At attach time, we allocate the soft state structure for the current * instance of the device. */ static int quantis_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int instance; quantis_soft_state_t *soft_state; ddi_device_acc_attr_t dev_acc_attr; /* Hold the device access attributes. */ int nregs; off_t regsize; char msg[MAX_MSG_LEN]; LOG_DEBUG0("attach\n"); switch (cmd) { case DDI_ATTACH: instance = ddi_get_instance(dip); snprintf(msg, MAX_MSG_LEN, "Attaching the Quantis device %d.\n", instance); LOG_DEBUG0(msg); /* * PCI devices are self-identifying devices, so we check that we * indeed have a Quantis QRNG card by checking that we have one * register page with the correct size. */ if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) { snprintf(msg, MAX_MSG_LEN, "Could not get the number of register for the Quantis device %d.\n", instance); QUANTIS_ERROR(msg); return DDI_FAILURE; } if (nregs < 4) { snprintf(msg, MAX_MSG_LEN, "The Quantis device %d has %d PCI base registers, but should have at least 4.\n", instance, nregs); QUANTIS_ERROR(msg); return DDI_FAILURE; } if (ddi_dev_regsize(dip, QUANTIS_REG_IDX, ®size) != DDI_SUCCESS) { snprintf(msg, MAX_MSG_LEN, "Could not get the register size for the Quantis device %d.\n", instance); QUANTIS_ERROR(msg); return DDI_FAILURE; } if (regsize < (int)QUANTIS_REG_LENGTH) { snprintf(msg, MAX_MSG_LEN, "The size of the Quantice device (%d) registers file is %d bytes long, " "but should be at least %u bytes long.\n", instance, (int)regsize, (unsigned int)QUANTIS_REG_LENGTH); QUANTIS_ERROR(msg); return DDI_FAILURE; } LOG_DEBUG0("After test of the validity of the card, before soft state alloc.\n"); if (ddi_soft_state_zalloc(quantis_soft_state_p, instance) != DDI_SUCCESS) { snprintf(msg, MAX_MSG_LEN, "Could not allocate soft state structure for the Quantis device %d.\n", instance); QUANTIS_ERROR(msg); return DDI_FAILURE; } soft_state = (quantis_soft_state_t *)ddi_get_soft_state(quantis_soft_state_p, instance); soft_state->dip = dip; ddi_set_driver_private(dip, (caddr_t)soft_state); soft_state->cnt = 0; /* * Initialize the mutex in the soft state. We have no interrupt, * so we can set `arg' to `NULL' */ mutex_init(&soft_state->mutex, NULL, MUTEX_DRIVER, NULL); if (ddi_create_minor_node(dip, ddi_get_name(dip), S_IFCHR, instance, DDI_PSEUDO, 0) == DDI_FAILURE) { snprintf(msg, MAX_MSG_LEN, "Could not create minor node for the Quantis device %d.\n", instance); QUANTIS_ERROR(msg); mutex_destroy(&soft_state->mutex); ddi_soft_state_free(quantis_soft_state_p, instance); return DDI_FAILURE; } LOG_DEBUG1("ddi_get_name %s\n", ddi_get_name(dip)); dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; if (ddi_regs_map_setup(dip, QUANTIS_REG_IDX, (caddr_t *)&soft_state->regs, 0, QUANTIS_REG_LENGTH, &dev_acc_attr, &soft_state->regs_handle) != DDI_SUCCESS) { snprintf(msg, MAX_MSG_LEN, "Could not map the registers space of the Quantis device %d.\n", instance); QUANTIS_ERROR(msg); mutex_destroy(&soft_state->mutex); ddi_soft_state_free(quantis_soft_state_p, instance); return DDI_FAILURE; } mutex_enter(&quantis_mutex); card_count++; mutex_exit(&quantis_mutex); LOG_DEBUG0("Just before mutex\n"); mutex_enter(&soft_state->mutex); LOG_DEBUG0("Just before rng_reset.\n"); quantis_rng_reset(soft_state); LOG_DEBUG0("Just before enable_modules.\n"); quantis_rng_enable_modules(soft_state, quantis_rng_modules_mask(soft_state)); LOG_DEBUG0("Just before release mutex.\n"); mutex_exit(&soft_state->mutex); snprintf(msg, MAX_MSG_LEN, "Successfully attached the Quantis device %d. Currently, %d Quantis cards are available.\n", instance, card_count); QUANTIS_INFO(msg); # ifdef DEBUG ddi_report_dev(dip); # endif return DDI_SUCCESS; case DDI_SUSPEND: case DDI_PM_SUSPEND: return DDI_SUCCESS; default: return DDI_FAILURE; } }