static int vnex_attach(device_t dev) { struct vnex_devinfo *vndi; struct vnex_softc *sc; device_t cdev; phandle_t node; mde_cookie_t rootnode, *listp = NULL; int i, listsz, num_nodes, num_devices; md_t *mdp; node = ofw_bus_get_node(dev); if (node == -1) panic("%s: ofw_bus_get_node failed.", __func__); sc = device_get_softc(dev); sc->sc_intr_rman.rm_type = RMAN_ARRAY; sc->sc_intr_rman.rm_descr = "Interrupts"; sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "Device Memory"; if (rman_init(&sc->sc_intr_rman) != 0 || rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_intr_rman, 0, IV_MAX - 1) != 0 || rman_manage_region(&sc->sc_mem_rman, 0ULL, ~0ULL) != 0) panic("%s: failed to set up rmans.", __func__); if ((mdp = md_get()) == NULL) return (ENXIO); num_nodes = md_node_count(mdp); listsz = num_nodes * sizeof(mde_cookie_t); listp = (mde_cookie_t *)malloc(listsz, M_DEVBUF, M_WAITOK); rootnode = md_root_node(mdp); /* * scan the machine description for virtual devices */ num_devices = md_scan_dag(mdp, rootnode, md_find_name(mdp, "virtual-device"), md_find_name(mdp, "fwd"), listp); for (i = 0; i < num_devices; i++) { if ((vndi = vnex_setup_dinfo(dev, listp[i])) == NULL) continue; cdev = device_add_child(dev, NULL, -1); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", vndi->vndi_mbdinfo.mbd_name); vnex_destroy_dinfo(vndi); continue; } device_set_ivars(cdev, vndi); } bus_generic_attach(dev); free(listp, M_DEVBUF); return (0); }
/* * DR event handler * respond to the picl events: * PICLEVENT_DR_AP_STATE_CHANGE */ static void dr_handler(const char *ename, const void *earg, size_t size, void *cookie) { nvlist_t *nvlp = NULL; char *dtype; char *ap_id; char *hint; if (strcmp(ename, PICLEVENT_DR_AP_STATE_CHANGE) != 0) { return; } if (nvlist_unpack((char *)earg, size, &nvlp, NULL)) { return; } if (nvlist_lookup_string(nvlp, PICLEVENTARG_DATA_TYPE, &dtype)) { nvlist_free(nvlp); return; } if (strcmp(dtype, PICLEVENTARG_PICLEVENT_DATA) != 0) { nvlist_free(nvlp); return; } if (nvlist_lookup_string(nvlp, PICLEVENTARG_AP_ID, &ap_id)) { nvlist_free(nvlp); return; } if (nvlist_lookup_string(nvlp, PICLEVENTARG_HINT, &hint)) { nvlist_free(nvlp); return; } mdp = mdesc_devinit(); if (mdp == NULL) { nvlist_free(nvlp); return; } rootnode = md_root_node(mdp); if (strcmp(hint, DR_HINT_INSERT) == 0) (void) update_devices(ap_id, DEV_ADD); else if (strcmp(hint, DR_HINT_REMOVE) == 0) (void) update_devices(ap_id, DEV_REMOVE); mdesc_devfini(mdp); nvlist_free(nvlp); /* * Signal the devtree plugin to add more cpu properties. */ signal_devtree(); }
void mdescplugin_init(void) { int status; status = ptree_get_root(&root_node); if (status != PICL_SUCCESS) { return; } mdp = mdesc_devinit(); if (mdp == NULL) return; /* * update the cpu configuration in case the snapshot cache used by the * devtree plugin is out of date. */ (void) update_devices(OBP_CPU, DEV_ADD); (void) update_devices(OBP_CPU, DEV_REMOVE); rootnode = md_root_node(mdp); /* * This is the start of the CPU property augmentation code. * add_cpu_prop and the rest of the CPU code lives in cpu_prop_update.c */ status = ptree_walk_tree_by_class(root_node, "cpu", NULL, add_cpu_prop); if (status != PICL_SUCCESS) { return; } signal_devtree(); (void) disk_discovery(); /* * register dsc_handler for both "sysevent-device-added" and * and for "sysevent-device-removed" PICL events */ (void) ptree_register_handler(PICLEVENT_SYSEVENT_DEVICE_ADDED, dsc_handler, NULL); (void) ptree_register_handler(PICLEVENT_SYSEVENT_DEVICE_REMOVED, dsc_handler, NULL); (void) ptree_register_handler(PICLEVENT_DR_AP_STATE_CHANGE, dr_handler, NULL); mdesc_devfini(mdp); }
int md_vdev_find_node(device_t dev, mde_cookie_t *valp) { uint64_t cfg_handle; mde_cookie_t rootnode, node, *listp = NULL; int i, listsz, num_nodes, num_devices, error; md_t *mdp; cfg_handle = mdesc_bus_get_handle(dev); error = EINVAL; if ((mdp = md_get()) == NULL) return (ENXIO); num_nodes = md_node_count(mdp); listsz = num_nodes * sizeof(mde_cookie_t); listp = (mde_cookie_t *)malloc(listsz, M_DEVBUF, M_WAITOK); rootnode = md_root_node(mdp); node = error = 0; num_devices = md_scan_dag(mdp, rootnode, md_find_name(mdp, "virtual-device"), md_find_name(mdp, "fwd"), listp); if (num_devices == 0) { error = ENOENT; goto done; } for (i = 0; i < num_devices; i++) { uint64_t thandle; node = listp[i]; md_get_prop_val(mdp, node, "cfg-handle", &thandle); if (thandle == cfg_handle) { *valp = node; break; } } done: md_put(mdp); return (error); }
static void init_domaining_capabilities(md_t *mdp, mde_cookie_t *listp) { mde_cookie_t rootnode; int num_nodes; uint64_t val = 0; rootnode = md_root_node(mdp); ASSERT(rootnode != MDE_INVAL_ELEM_COOKIE); num_nodes = md_scan_dag(mdp, rootnode, md_find_name(mdp, "platform"), md_find_name(mdp, "fwd"), listp); /* should only be one platform node */ ASSERT(num_nodes == 1); if (md_get_prop_val(mdp, *listp, "domaining-enabled", &val) != 0) { /* * The property is not present. This implies * that the firmware does not support domaining * features. */ MDP(("'domaining-enabled' property not present\n")); domaining_capabilities = 0; return; } domaining_capabilities = DOMAINING_SUPPORTED; if (val == 1) { if (force_domaining_disabled) { MDP(("domaining manually disabled\n")); } else { domaining_capabilities |= DOMAINING_ENABLED; } } MDP(("domaining_capabilities= 0x%x\n", domaining_capabilities)); }
/* * Routine used to setup a newly inserted CPU in preparation for starting * it running code. */ int mp_cpu_configure(int cpuid) { md_t *mdp; mde_cookie_t rootnode, cpunode = MDE_INVAL_ELEM_COOKIE; int listsz, i; mde_cookie_t *listp = NULL; int num_nodes; uint64_t cpuid_prop; cpu_t *cpu; processorid_t id; ASSERT(MUTEX_HELD(&cpu_lock)); if ((mdp = md_get_handle()) == NULL) return (ENODEV); rootnode = md_root_node(mdp); ASSERT(rootnode != MDE_INVAL_ELEM_COOKIE); num_nodes = md_node_count(mdp); ASSERT(num_nodes > 0); listsz = num_nodes * sizeof (mde_cookie_t); listp = kmem_zalloc(listsz, KM_SLEEP); num_nodes = md_scan_dag(mdp, rootnode, md_find_name(mdp, "cpu"), md_find_name(mdp, "fwd"), listp); if (num_nodes < 0) return (ENODEV); for (i = 0; i < num_nodes; i++) { if (md_get_prop_val(mdp, listp[i], "id", &cpuid_prop)) break; if (cpuid_prop == (uint64_t)cpuid) { cpunode = listp[i]; break; } } if (cpunode == MDE_INVAL_ELEM_COOKIE) return (ENODEV); kmem_free(listp, listsz); mpo_cpu_add(mdp, cpuid); /* * Note: uses cpu_lock to protect cpunodes * which will be modified inside of fill_cpu and * setup_exec_unit_mappings. */ fill_cpu(mdp, cpunode); /* * Adding a CPU may cause the execution unit sharing * relationships to change. Update the mappings in * the cpunode structures. */ setup_chip_mappings(mdp); setup_exec_unit_mappings(mdp); /* propagate the updated mappings to the CPU structures */ for (id = 0; id < NCPU; id++) { if ((cpu = cpu_get(id)) == NULL) continue; cpu_map_exec_units(cpu); } (void) md_fini_handle(mdp); if ((i = setup_cpu_common(cpuid)) != 0) { (void) cleanup_cpu_common(cpuid); return (i); } return (0); }
/* * Exported interface to register a LDC endpoint with * the channel nexus */ static int cnex_reg_chan(dev_info_t *dip, uint64_t id, ldc_dev_t devclass) { int idx; cnex_ldc_t *cldcp; cnex_ldc_t *new_cldcp; int listsz, num_nodes, num_channels; md_t *mdp = NULL; mde_cookie_t rootnode, *listp = NULL; uint64_t tmp_id; uint64_t rxino = (uint64_t)-1; uint64_t txino = (uint64_t)-1; cnex_soft_state_t *cnex_ssp; int status, instance; dev_info_t *chan_dip = NULL; /* Get device instance and structure */ instance = ddi_get_instance(dip); cnex_ssp = ddi_get_soft_state(cnex_state, instance); /* Check to see if channel is already registered */ mutex_enter(&cnex_ssp->clist_lock); cldcp = cnex_ssp->clist; while (cldcp) { if (cldcp->id == id) { DWARN("cnex_reg_chan: channel 0x%llx exists\n", id); mutex_exit(&cnex_ssp->clist_lock); return (EINVAL); } cldcp = cldcp->next; } mutex_exit(&cnex_ssp->clist_lock); /* Get the Tx/Rx inos from the MD */ if ((mdp = md_get_handle()) == NULL) { DWARN("cnex_reg_chan: cannot init MD\n"); return (ENXIO); } num_nodes = md_node_count(mdp); ASSERT(num_nodes > 0); listsz = num_nodes * sizeof (mde_cookie_t); listp = (mde_cookie_t *)kmem_zalloc(listsz, KM_SLEEP); rootnode = md_root_node(mdp); /* search for all channel_endpoint nodes */ num_channels = md_scan_dag(mdp, rootnode, md_find_name(mdp, "channel-endpoint"), md_find_name(mdp, "fwd"), listp); if (num_channels <= 0) { DWARN("cnex_reg_chan: invalid channel id\n"); kmem_free(listp, listsz); (void) md_fini_handle(mdp); return (EINVAL); } for (idx = 0; idx < num_channels; idx++) { /* Get the channel ID */ status = md_get_prop_val(mdp, listp[idx], "id", &tmp_id); if (status) { DWARN("cnex_reg_chan: cannot read LDC ID\n"); kmem_free(listp, listsz); (void) md_fini_handle(mdp); return (ENXIO); } if (tmp_id != id) continue; /* Get the Tx and Rx ino */ status = md_get_prop_val(mdp, listp[idx], "tx-ino", &txino); if (status) { DWARN("cnex_reg_chan: cannot read Tx ino\n"); kmem_free(listp, listsz); (void) md_fini_handle(mdp); return (ENXIO); } status = md_get_prop_val(mdp, listp[idx], "rx-ino", &rxino); if (status) { DWARN("cnex_reg_chan: cannot read Rx ino\n"); kmem_free(listp, listsz); (void) md_fini_handle(mdp); return (ENXIO); } chan_dip = cnex_find_chan_dip(dip, id, mdp, listp[idx]); ASSERT(chan_dip != NULL); } kmem_free(listp, listsz); (void) md_fini_handle(mdp); /* * check to see if we looped through the list of channel IDs without * matching one (i.e. an 'ino' has not been initialised). */ if ((rxino == -1) || (txino == -1)) { DERR("cnex_reg_chan: no ID matching '%llx' in MD\n", id); return (ENOENT); } /* Allocate a new channel structure */ new_cldcp = kmem_zalloc(sizeof (*new_cldcp), KM_SLEEP); /* Initialize the channel */ mutex_init(&new_cldcp->lock, NULL, MUTEX_DRIVER, NULL); new_cldcp->id = id; new_cldcp->tx.ino = txino; new_cldcp->rx.ino = rxino; new_cldcp->devclass = devclass; new_cldcp->tx.weight = CNEX_TX_INTR_WEIGHT; new_cldcp->rx.weight = cnex_class_weight(devclass); new_cldcp->dip = chan_dip; /* * Add channel to nexus channel list. * Check again to see if channel is already registered since * clist_lock was dropped above. */ mutex_enter(&cnex_ssp->clist_lock); cldcp = cnex_ssp->clist; while (cldcp) { if (cldcp->id == id) { DWARN("cnex_reg_chan: channel 0x%llx exists\n", id); mutex_exit(&cnex_ssp->clist_lock); mutex_destroy(&new_cldcp->lock); kmem_free(new_cldcp, sizeof (*new_cldcp)); return (EINVAL); } cldcp = cldcp->next; } new_cldcp->next = cnex_ssp->clist; cnex_ssp->clist = new_cldcp; mutex_exit(&cnex_ssp->clist_lock); return (0); }