void device_desc_default_set(devfs_handle_t dev, device_desc_t new_device_desc) { #ifdef LATER graph_error_t rc; device_desc_t old_device_desc = NULL; if (new_device_desc) { new_device_desc->flags |= D_IS_ASSOC; rc = hwgraph_info_add_LBL(dev, INFO_LBL_DEVICE_DESC, (arbitrary_info_t)new_device_desc); if (rc == GRAPH_DUP) { rc = hwgraph_info_replace_LBL(dev, INFO_LBL_DEVICE_DESC, (arbitrary_info_t)new_device_desc, (arbitrary_info_t *)&old_device_desc); ASSERT(rc == GRAPH_SUCCESS); } hwgraph_info_export_LBL(dev, INFO_LBL_DEVICE_DESC, sizeof(struct device_desc_s)); } else { rc = hwgraph_info_remove_LBL(dev, INFO_LBL_DEVICE_DESC, (arbitrary_info_t *)&old_device_desc); } if (old_device_desc) { ASSERT(old_device_desc->flags & D_IS_ASSOC); old_device_desc->flags &= ~D_IS_ASSOC; device_desc_free(old_device_desc); } #endif FIXME("device_desc_default_set"); }
/* * copy xwidget_info_t from conn_v to peer_conn_v */ static int pic_bus1_widget_info_dup(vertex_hdl_t conn_v, vertex_hdl_t peer_conn_v, cnodeid_t xbow_peer, char *peer_path) { xwidget_info_t widget_info, peer_widget_info; vertex_hdl_t peer_hubv; hubinfo_t peer_hub_info; /* get the peer hub's widgetid */ peer_hubv = NODEPDA(xbow_peer)->node_vertex; peer_hub_info = NULL; hubinfo_get(peer_hubv, &peer_hub_info); if (peer_hub_info == NULL) return 0; if (hwgraph_info_get_LBL(conn_v, INFO_LBL_XWIDGET, (arbitrary_info_t *)&widget_info) == GRAPH_SUCCESS) { peer_widget_info = kmalloc(sizeof (*(peer_widget_info)), GFP_KERNEL); if ( !peer_widget_info ) { return -ENOMEM; } memset(peer_widget_info, 0, sizeof (*(peer_widget_info))); peer_widget_info->w_fingerprint = widget_info_fingerprint; peer_widget_info->w_vertex = peer_conn_v; peer_widget_info->w_id = widget_info->w_id; peer_widget_info->w_master = peer_hubv; peer_widget_info->w_masterid = peer_hub_info->h_widgetid; /* structure copy */ peer_widget_info->w_hwid = widget_info->w_hwid; peer_widget_info->w_efunc = 0; peer_widget_info->w_einfo = 0; peer_widget_info->w_name = kmalloc(strlen(peer_path) + 1, GFP_KERNEL); if (!peer_widget_info->w_name) { kfree(peer_widget_info); return -ENOMEM; } strcpy(peer_widget_info->w_name, peer_path); if (hwgraph_info_add_LBL(peer_conn_v, INFO_LBL_XWIDGET, (arbitrary_info_t)peer_widget_info) != GRAPH_SUCCESS) { kfree(peer_widget_info->w_name); kfree(peer_widget_info); return 0; } xwidget_info_set(peer_conn_v, peer_widget_info); return 1; } printk("pic_bus1_widget_info_dup: " "cannot get INFO_LBL_XWIDGET from 0x%lx\n", (uint64_t)conn_v); return 0; }
void xswitch_vertex_init(vertex_hdl_t xswitch) { xswitch_vol_t xvolinfo; int rc; xvolinfo = snia_kmem_zalloc(sizeof(struct xswitch_vol_s)); init_MUTEX(&xvolinfo->xswitch_volunteer_mutex); rc = hwgraph_info_add_LBL(xswitch, INFO_LBL_XSWITCH_VOL, (arbitrary_info_t)xvolinfo); ASSERT(rc == GRAPH_SUCCESS); rc = rc; }
void xswitch_vertex_init(devfs_handle_t xswitch) { xswitch_vol_t xvolinfo; int rc; xvolinfo = kmalloc(sizeof(struct xswitch_vol_s), GFP_KERNEL); mutex_init(&xvolinfo->xswitch_volunteer_mutex); xvolinfo->xswitch_volunteer_count = 0; rc = hwgraph_info_add_LBL(xswitch, INFO_LBL_XSWITCH_VOL, (arbitrary_info_t)xvolinfo); ASSERT(rc == GRAPH_SUCCESS); rc = rc; }
void pcibr_hints_subdevs(devfs_handle_t xconn_vhdl, pciio_slot_t slot, uint64_t subdevs) { arbitrary_info_t ainfo = 0; char sdname[16]; devfs_handle_t pconn_vhdl = GRAPH_VERTEX_NONE; sprintf(sdname, "pci/%d", slot); (void) hwgraph_path_add(xconn_vhdl, sdname, &pconn_vhdl); if (pconn_vhdl == GRAPH_VERTEX_NONE) { #if DEBUG printk("pcibr_hints_subdevs: hwgraph_path_create failed at\n" "\t%p (seeking %s)\n", xconn_vhdl, sdname); #endif return; } hwgraph_info_get_LBL(pconn_vhdl, INFO_LBL_SUBDEVS, &ainfo); if (ainfo == 0) { uint64_t *subdevp; NEW(subdevp); if (!subdevp) { #if DEBUG printk("pcibr_hints_subdevs: subdev ptr alloc failed at\n" "\t%p\n", pconn_vhdl); #endif return; } *subdevp = subdevs; hwgraph_info_add_LBL(pconn_vhdl, INFO_LBL_SUBDEVS, (arbitrary_info_t) subdevp); hwgraph_info_get_LBL(pconn_vhdl, INFO_LBL_SUBDEVS, &ainfo); if (ainfo == (arbitrary_info_t) subdevp) return; DEL(subdevp); if (ainfo == (arbitrary_info_t) NULL) { #if DEBUG printk("pcibr_hints_subdevs: null subdevs ptr at\n" "\t%p\n", pconn_vhdl); #endif return; } #if DEBUG printk("pcibr_subdevs_get: dup subdev add_LBL at\n" "\t%p\n", pconn_vhdl); #endif } *(uint64_t *) ainfo = subdevs; }
void xswitch_vertex_init(vertex_hdl_t xswitch) { xswitch_vol_t xvolinfo; int rc; extern void * snia_kmem_zalloc(size_t size, int flag); xvolinfo = snia_kmem_zalloc(sizeof(struct xswitch_vol_s), GFP_KERNEL); mutex_init(&xvolinfo->xswitch_volunteer_mutex); rc = hwgraph_info_add_LBL(xswitch, INFO_LBL_XSWITCH_VOL, (arbitrary_info_t)xvolinfo); ASSERT(rc == GRAPH_SUCCESS); rc = rc; }
void pciio_info_set(vertex_hdl_t pciio, pciio_info_t pciio_info) { if (pciio_info != NULL) pciio_info->c_fingerprint = pciio_info_fingerprint; hwgraph_fastinfo_set(pciio, (arbitrary_info_t) pciio_info); /* Also, mark this vertex as a PCI slot * and use the pciio_info, so pciio_info_chk * can work (and be fairly efficient). */ hwgraph_info_add_LBL(pciio, INFO_LBL_PCIIO, (arbitrary_info_t) pciio_info); }
void xwidget_info_set(vertex_hdl_t xwidget, xwidget_info_t widget_info) { if (widget_info != NULL) widget_info->w_fingerprint = widget_info_fingerprint; hwgraph_fastinfo_set(xwidget, (arbitrary_info_t) widget_info); /* Also, mark this vertex as an xwidget, * and use the widget_info, so xwidget_info_chk * can work (and be fairly efficient). */ hwgraph_info_add_LBL(xwidget, INFO_LBL_XWIDGET, (arbitrary_info_t) widget_info); }
/* * copy xwidget_info_t from conn_v to peer_conn_v */ int pic_bus1_widget_info_dup(vertex_hdl_t conn_v, vertex_hdl_t peer_conn_v, cnodeid_t xbow_peer) { xwidget_info_t widget_info, peer_widget_info; char peer_path[256]; vertex_hdl_t peer_hubv; hubinfo_t peer_hub_info; /* get the peer hub's widgetid */ peer_hubv = NODEPDA(xbow_peer)->node_vertex; peer_hub_info = NULL; hubinfo_get(peer_hubv, &peer_hub_info); if (peer_hub_info == NULL) return 0; if (hwgraph_info_get_LBL(conn_v, INFO_LBL_XWIDGET, (arbitrary_info_t *)&widget_info) == GRAPH_SUCCESS) { NEW(peer_widget_info); peer_widget_info->w_vertex = peer_conn_v; peer_widget_info->w_id = widget_info->w_id; peer_widget_info->w_master = peer_hubv; peer_widget_info->w_masterid = peer_hub_info->h_widgetid; /* structure copy */ peer_widget_info->w_hwid = widget_info->w_hwid; peer_widget_info->w_efunc = 0; peer_widget_info->w_einfo = 0; peer_widget_info->w_name = kmalloc(strlen(peer_path) + 1, GFP_KERNEL); strcpy(peer_widget_info->w_name, peer_path); if (hwgraph_info_add_LBL(peer_conn_v, INFO_LBL_XWIDGET, (arbitrary_info_t)peer_widget_info) != GRAPH_SUCCESS) { DEL(peer_widget_info); return 0; } xwidget_info_set(peer_conn_v, peer_widget_info); return 1; } printk("pic_bus1_widget_info_dup: " "cannot get INFO_LBL_XWIDGET from 0x%lx\n", (uint64_t)conn_v); return 0; }
void xswitch_vertex_init(vertex_hdl_t xswitch) { xswitch_vol_t xvolinfo; int rc; xvolinfo = kmalloc(sizeof(struct xswitch_vol_s), GFP_KERNEL); if (!xvolinfo) { printk(KERN_WARNING "xswitch_vertex_init(): Unable to " "allocate memory\n"); return; } memset(xvolinfo, 0, sizeof(struct xswitch_vol_s)); init_MUTEX(&xvolinfo->xswitch_volunteer_mutex); rc = hwgraph_info_add_LBL(xswitch, INFO_LBL_XSWITCH_VOL, (arbitrary_info_t)xvolinfo); ASSERT(rc == GRAPH_SUCCESS); rc = rc; }
void pcibr_hints_subdevs(devfs_handle_t xconn_vhdl, pciio_slot_t slot, uint64_t subdevs) { arbitrary_info_t ainfo = 0; char sdname[16]; devfs_handle_t pconn_vhdl = GRAPH_VERTEX_NONE; sprintf(sdname, "%s/%d", EDGE_LBL_PCI, slot); (void) hwgraph_path_add(xconn_vhdl, sdname, &pconn_vhdl); if (pconn_vhdl == GRAPH_VERTEX_NONE) { PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_HINTS, xconn_vhdl, "pcibr_hints_subdevs: hwgraph_path_create failed\n")); return; } hwgraph_info_get_LBL(pconn_vhdl, INFO_LBL_SUBDEVS, &ainfo); if (ainfo == 0) { uint64_t *subdevp; NEW(subdevp); if (!subdevp) { PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_HINTS, xconn_vhdl, "pcibr_hints_subdevs: subdev ptr alloc failed\n")); return; } *subdevp = subdevs; hwgraph_info_add_LBL(pconn_vhdl, INFO_LBL_SUBDEVS, (arbitrary_info_t) subdevp); hwgraph_info_get_LBL(pconn_vhdl, INFO_LBL_SUBDEVS, &ainfo); if (ainfo == (arbitrary_info_t) subdevp) return; DEL(subdevp); if (ainfo == (arbitrary_info_t) NULL) { PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_HINTS, xconn_vhdl, "pcibr_hints_subdevs: null subdevs ptr\n")); return; } PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_HINTS, xconn_vhdl, "pcibr_subdevs_get: dup subdev add_LBL\n")); } *(uint64_t *) ainfo = subdevs; }
void __init klhwg_add_all_modules(vertex_hdl_t hwgraph_root) { cmoduleid_t cm; char name[128]; vertex_hdl_t vhdl; vertex_hdl_t module_vhdl; int rc; char buffer[16]; /* Add devices under each module */ for (cm = 0; cm < nummodules; cm++) { /* Use module as module vertex fastinfo */ memset(buffer, 0, 16); format_module_id(buffer, sn_modules[cm]->id, MODULE_FORMAT_BRIEF); sprintf(name, EDGE_LBL_MODULE "/%s", buffer); rc = hwgraph_path_add(hwgraph_root, name, &module_vhdl); ASSERT(rc == GRAPH_SUCCESS); rc = rc; HWGRAPH_DEBUG(__FILE__, __FUNCTION__, __LINE__, module_vhdl, NULL, "Created module path.\n"); hwgraph_fastinfo_set(module_vhdl, (arbitrary_info_t) sn_modules[cm]); /* Add system controller */ sprintf(name, EDGE_LBL_MODULE "/%s/" EDGE_LBL_L1, buffer); rc = hwgraph_path_add(hwgraph_root, name, &vhdl); ASSERT_ALWAYS(rc == GRAPH_SUCCESS); rc = rc; HWGRAPH_DEBUG(__FILE__, __FUNCTION__, __LINE__, vhdl, NULL, "Created L1 path.\n"); hwgraph_info_add_LBL(vhdl, INFO_LBL_ELSC, (arbitrary_info_t)1); } }
/* * copy inventory_t from conn_v to peer_conn_v */ int pic_bus1_inventory_dup(vertex_hdl_t conn_v, vertex_hdl_t peer_conn_v) { inventory_t *pinv, *peer_pinv; if (hwgraph_info_get_LBL(conn_v, INFO_LBL_INVENT, (arbitrary_info_t *)&pinv) == GRAPH_SUCCESS) { NEW(peer_pinv); bcopy((const char *)pinv, (char *)peer_pinv, sizeof(inventory_t)); if (hwgraph_info_add_LBL(peer_conn_v, INFO_LBL_INVENT, (arbitrary_info_t)peer_pinv) != GRAPH_SUCCESS) { DEL(peer_pinv); return 0; } return 1; } printk("pic_bus1_inventory_dup: cannot get INFO_LBL_INVENT from 0x%lx\n ", (uint64_t)conn_v); return 0; }
pcibr_hints_t pcibr_hints_get(devfs_handle_t xconn_vhdl, int alloc) { arbitrary_info_t ainfo = 0; graph_error_t rv; pcibr_hints_t hint; rv = hwgraph_info_get_LBL(xconn_vhdl, INFO_LBL_PCIBR_HINTS, &ainfo); if (alloc && (rv != GRAPH_SUCCESS)) { NEW(hint); hint->rrb_alloc_funct = NULL; hint->ph_intr_bits = NULL; rv = hwgraph_info_add_LBL(xconn_vhdl, INFO_LBL_PCIBR_HINTS, (arbitrary_info_t) hint); if (rv != GRAPH_SUCCESS) goto abnormal_exit; rv = hwgraph_info_get_LBL(xconn_vhdl, INFO_LBL_PCIBR_HINTS, &ainfo); if (rv != GRAPH_SUCCESS) goto abnormal_exit; if (ainfo != (arbitrary_info_t) hint) goto abnormal_exit; } return (pcibr_hints_t) ainfo; abnormal_exit: #ifdef LATER printf("SHOULD NOT BE HERE\n"); #endif DEL(hint); return(NULL); }
/* Add "detailed" labelled inventory information to the * prom vertex */ void cpuprom_detailed_inventory_info_add(devfs_handle_t prom_dev,devfs_handle_t node) { invent_miscinfo_t *cpuprom_inventory_info; extern invent_generic_t *klhwg_invent_alloc(cnodeid_t cnode, int class, int size); cnodeid_t cnode = hubdev_cnodeid_get(node); /* Allocate memory for the extra inventory information * for the prom */ cpuprom_inventory_info = (invent_miscinfo_t *) klhwg_invent_alloc(cnode, INV_PROM, sizeof(invent_miscinfo_t)); ASSERT(cpuprom_inventory_info); /* Set the enabled flag so that the hinv interprets this * information */ cpuprom_inventory_info->im_gen.ig_flag = INVENT_ENABLED; cpuprom_inventory_info->im_type = SN_PROMVERSION; /* Store prom revision into inventory information */ cpuprom_inventory_info->im_rev = IP27CONFIG.pvers_rev; cpuprom_inventory_info->im_version = IP27CONFIG.pvers_vers; /* Store this info as labelled information hanging off the * prom device vertex */ hwgraph_info_add_LBL(prom_dev, INFO_LBL_DETAIL_INVENT, (arbitrary_info_t) cpuprom_inventory_info); /* Export this information so that user programs can get to * this by using attr_get() */ hwgraph_info_export_LBL(prom_dev, INFO_LBL_DETAIL_INVENT, sizeof(invent_miscinfo_t)); }
/* * Associate a set of pciio_provider functions with a vertex. */ void pciio_provider_register(devfs_handle_t provider, pciio_provider_t *pciio_fns) { hwgraph_info_add_LBL(provider, INFO_LBL_PFUNCS, (arbitrary_info_t) pciio_fns); }
pciio_info_t pciio_info_get(devfs_handle_t pciio) { pciio_info_t pciio_info; pciio_info = (pciio_info_t) hwgraph_fastinfo_get(pciio); #ifdef DEBUG_PCIIO { int pos; char dname[256]; pos = devfs_generate_path(pciio, dname, 256); printk("%s : path= %s\n", __FUNCTION__, &dname[pos]); } #endif /* DEBUG_PCIIO */ #ifdef BRINGUP if ((pciio_info != NULL) && (pciio_info->c_fingerprint != pciio_info_fingerprint) && (pciio_info->c_fingerprint != NULL)) { #else if ((pciio_info != NULL) && (pciio_info->c_fingerprint != pciio_info_fingerprint)) { #endif /* BRINGUP */ return((pciio_info_t)-1); /* Should panic .. */ } return pciio_info; } void pciio_info_set(devfs_handle_t pciio, pciio_info_t pciio_info) { if (pciio_info != NULL) pciio_info->c_fingerprint = pciio_info_fingerprint; hwgraph_fastinfo_set(pciio, (arbitrary_info_t) pciio_info); /* Also, mark this vertex as a PCI slot * and use the pciio_info, so pciio_info_chk * can work (and be fairly efficient). */ hwgraph_info_add_LBL(pciio, INFO_LBL_PCIIO, (arbitrary_info_t) pciio_info); } devfs_handle_t pciio_info_dev_get(pciio_info_t pciio_info) { return (pciio_info->c_vertex); } /*ARGSUSED*/ pciio_bus_t pciio_info_bus_get(pciio_info_t pciio_info) { /* XXX for now O2 always gets back bus 0 */ return (pciio_bus_t)0; } pciio_slot_t pciio_info_slot_get(pciio_info_t pciio_info) { return (pciio_info->c_slot); } pciio_function_t pciio_info_function_get(pciio_info_t pciio_info) { return (pciio_info->c_func); } pciio_vendor_id_t pciio_info_vendor_id_get(pciio_info_t pciio_info) { return (pciio_info->c_vendor); } pciio_device_id_t pciio_info_device_id_get(pciio_info_t pciio_info) { return (pciio_info->c_device); } devfs_handle_t pciio_info_master_get(pciio_info_t pciio_info) { return (pciio_info->c_master); } arbitrary_info_t pciio_info_mfast_get(pciio_info_t pciio_info) { return (pciio_info->c_mfast); } pciio_provider_t * pciio_info_pops_get(pciio_info_t pciio_info) { return (pciio_info->c_pops); } error_handler_f * pciio_info_efunc_get(pciio_info_t pciio_info) { return (pciio_info->c_efunc); } error_handler_arg_t * pciio_info_einfo_get(pciio_info_t pciio_info) { return (pciio_info->c_einfo); } pciio_space_t pciio_info_bar_space_get(pciio_info_t info, int win) { return info->c_window[win].w_space; } iopaddr_t pciio_info_bar_base_get(pciio_info_t info, int win) { return info->c_window[win].w_base; } size_t pciio_info_bar_size_get(pciio_info_t info, int win) { return info->c_window[win].w_size; } iopaddr_t pciio_info_rom_base_get(pciio_info_t info) { return info->c_rbase; } size_t pciio_info_rom_size_get(pciio_info_t info) { return info->c_rsize; } /* ===================================================================== * GENERIC PCI INITIALIZATION FUNCTIONS */ /* * pciioinit: called once during device driver * initializtion if this driver is configured into * the system. */ void pciio_init(void) { cdl_p cp; #if DEBUG && ATTACH_DEBUG printf("pciio_init\n"); #endif /* Allocate the registry. * We might already have one. * If we don't, go get one. * MPness: someone might have * set one up for us while we * were not looking; use an atomic * compare-and-swap to commit to * using the new registry if and * only if nobody else did first. * If someone did get there first, * toss the one we allocated back * into the pool. */ if (pciio_registry == NULL) { cp = cdl_new(EDGE_LBL_PCI, "vendor", "device"); if (!compare_and_swap_ptr((void **) &pciio_registry, NULL, (void *) cp)) { cdl_del(cp); } } ASSERT(pciio_registry != NULL); }
/* * PIC has two buses under a single widget. pic_attach() calls pic_attach2() * to attach each of those buses. */ static int pic_attach2(vertex_hdl_t xconn_vhdl, void *bridge, vertex_hdl_t pcibr_vhdl, int busnum, pcibr_soft_t *ret_softp) { vertex_hdl_t ctlr_vhdl; pcibr_soft_t pcibr_soft; pcibr_info_t pcibr_info; xwidget_info_t info; xtalk_intr_t xtalk_intr; pcibr_list_p self; int entry, slot, ibit, i; vertex_hdl_t noslot_conn; char devnm[MAXDEVNAME], *s; pcibr_hints_t pcibr_hints; picreg_t id; picreg_t int_enable; picreg_t pic_ctrl_reg; int iobrick_type_get_nasid(nasid_t nasid); int iomoduleid_get(nasid_t nasid); int irq; int cpu; PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl, "pic_attach2: bridge=0x%lx, busnum=%d\n", bridge, busnum)); ctlr_vhdl = NULL; ctlr_vhdl = hwgraph_register(pcibr_vhdl, EDGE_LBL_CONTROLLER, 0, 0, 0, 0, S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0, (struct file_operations *)&pcibr_fops, (void *)pcibr_vhdl); ASSERT(ctlr_vhdl != NULL); id = pcireg_bridge_id_get(bridge); hwgraph_info_add_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, (arbitrary_info_t)XWIDGET_PART_REV_NUM(id)); /* * Get the hint structure; if some NIC callback marked this vertex as * "hands-off" then we just return here, before doing anything else. */ pcibr_hints = pcibr_hints_get(xconn_vhdl, 0); if (pcibr_hints && pcibr_hints->ph_hands_off) return -1; /* allocate soft structure to hang off the vertex. Link the new soft * structure to the pcibr_list linked list */ pcibr_soft = kmalloc(sizeof (*(pcibr_soft)), GFP_KERNEL); if ( !pcibr_soft ) return -ENOMEM; self = kmalloc(sizeof (*(self)), GFP_KERNEL); if ( !self ) { kfree(pcibr_soft); return -ENOMEM; } memset(pcibr_soft, 0, sizeof (*(pcibr_soft))); memset(self, 0, sizeof (*(self))); self->bl_soft = pcibr_soft; self->bl_vhdl = pcibr_vhdl; self->bl_next = pcibr_list; pcibr_list = self; if (ret_softp) *ret_softp = pcibr_soft; memset(pcibr_soft, 0, sizeof *pcibr_soft); pcibr_soft_set(pcibr_vhdl, pcibr_soft); s = dev_to_name(pcibr_vhdl, devnm, MAXDEVNAME); pcibr_soft->bs_name = kmalloc(strlen(s) + 1, GFP_KERNEL); if (!pcibr_soft->bs_name) return -ENOMEM; strcpy(pcibr_soft->bs_name, s); pcibr_soft->bs_conn = xconn_vhdl; pcibr_soft->bs_vhdl = pcibr_vhdl; pcibr_soft->bs_base = (void *)bridge; pcibr_soft->bs_rev_num = XWIDGET_PART_REV_NUM(id); pcibr_soft->bs_intr_bits = (pcibr_intr_bits_f *)pcibr_intr_bits; pcibr_soft->bsi_err_intr = 0; pcibr_soft->bs_min_slot = 0; pcibr_soft->bs_max_slot = 3; pcibr_soft->bs_busnum = busnum; pcibr_soft->bs_bridge_type = PCIBR_BRIDGETYPE_PIC; pcibr_soft->bs_int_ate_size = PIC_INTERNAL_ATES; /* Make sure this is called after setting the bs_base and bs_bridge_type */ pcibr_soft->bs_bridge_mode = (pcireg_speed_get(pcibr_soft) << 1) | pcireg_mode_get(pcibr_soft); info = xwidget_info_get(xconn_vhdl); pcibr_soft->bs_xid = xwidget_info_id_get(info); pcibr_soft->bs_master = xwidget_info_master_get(info); pcibr_soft->bs_mxid = xwidget_info_masterid_get(info); strcpy(pcibr_soft->bs_asic_name, "PIC"); PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl, "pic_attach2: pcibr_soft=0x%lx, mode=0x%x\n", pcibr_soft, pcibr_soft->bs_bridge_mode)); PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl, "pic_attach2: %s ASIC: rev %s (code=0x%x)\n", pcibr_soft->bs_asic_name, (IS_PIC_PART_REV_A(pcibr_soft->bs_rev_num)) ? "A" : (IS_PIC_PART_REV_B(pcibr_soft->bs_rev_num)) ? "B" : (IS_PIC_PART_REV_C(pcibr_soft->bs_rev_num)) ? "C" : "unknown", pcibr_soft->bs_rev_num)); /* PV854845: Must clear write request buffer to avoid parity errors */ for (i=0; i < PIC_WR_REQ_BUFSIZE; i++) { ((pic_t *)bridge)->p_wr_req_lower[i] = 0; ((pic_t *)bridge)->p_wr_req_upper[i] = 0; ((pic_t *)bridge)->p_wr_req_parity[i] = 0; } pcibr_soft->bs_nasid = NASID_GET(bridge); pcibr_soft->bs_bricktype = iobrick_type_get_nasid(pcibr_soft->bs_nasid); if (pcibr_soft->bs_bricktype < 0) printk(KERN_WARNING "%s: bricktype was unknown by L1 (ret val = 0x%x)\n", pcibr_soft->bs_name, pcibr_soft->bs_bricktype); pcibr_soft->bs_moduleid = iomoduleid_get(pcibr_soft->bs_nasid); if (pcibr_soft->bs_bricktype > 0) { switch (pcibr_soft->bs_bricktype) { case MODULE_PXBRICK: case MODULE_IXBRICK: case MODULE_OPUSBRICK: pcibr_soft->bs_first_slot = 0; pcibr_soft->bs_last_slot = 1; pcibr_soft->bs_last_reset = 1; /* Bus 1 of IXBrick has a IO9, so there are 4 devices, not 2 */ if ((pcibr_widget_to_bus(pcibr_vhdl) == 1) && isIO9(pcibr_soft->bs_nasid)) { pcibr_soft->bs_last_slot = 3; pcibr_soft->bs_last_reset = 3; } break; case MODULE_CGBRICK: pcibr_soft->bs_first_slot = 0; pcibr_soft->bs_last_slot = 0; pcibr_soft->bs_last_reset = 0; break; default: printk(KERN_WARNING "%s: Unknown bricktype: 0x%x\n", pcibr_soft->bs_name, pcibr_soft->bs_bricktype); break; } PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl, "pic_attach2: bricktype=%d, brickbus=%d, " "slots %d-%d\n", pcibr_soft->bs_bricktype, pcibr_widget_to_bus(pcibr_vhdl), pcibr_soft->bs_first_slot, pcibr_soft->bs_last_slot)); } /* * Initialize bridge and bus locks */ spin_lock_init(&pcibr_soft->bs_lock); /* * If we have one, process the hints structure. */ if (pcibr_hints) { unsigned rrb_fixed; PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_HINTS, pcibr_vhdl, "pic_attach2: pcibr_hints=0x%lx\n", pcibr_hints)); rrb_fixed = pcibr_hints->ph_rrb_fixed; pcibr_soft->bs_rrb_fixed = rrb_fixed; if (pcibr_hints->ph_intr_bits) pcibr_soft->bs_intr_bits = pcibr_hints->ph_intr_bits; for (slot = pcibr_soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) { int hslot = pcibr_hints->ph_host_slot[slot] - 1; if (hslot < 0) { pcibr_soft->bs_slot[slot].host_slot = slot; } else { pcibr_soft->bs_slot[slot].has_host = 1; pcibr_soft->bs_slot[slot].host_slot = hslot; } } } /* * Set-up initial values for state fields */ for (slot = pcibr_soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) { pcibr_soft->bs_slot[slot].bss_devio.bssd_space = PCIIO_SPACE_NONE; pcibr_soft->bs_slot[slot].bss_devio.bssd_ref_cnt = 0; pcibr_soft->bs_slot[slot].bss_d64_base = PCIBR_D64_BASE_UNSET; pcibr_soft->bs_slot[slot].bss_d32_base = PCIBR_D32_BASE_UNSET; pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN0] = -1; } for (ibit = 0; ibit < 8; ++ibit) { pcibr_soft->bs_intr[ibit].bsi_xtalk_intr = 0; pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_soft = pcibr_soft; pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_list = NULL; pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_ibit = ibit; pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_hdlrcnt = 0; pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_shared = 0; pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_connected = 0; } /* * connect up our error handler. PIC has 2 busses (thus resulting in 2 * pcibr_soft structs under 1 widget), so only register a xwidget error * handler for PIC's bus0. NOTE: for PIC pcibr_error_handler_wrapper() * is a wrapper routine we register that will call the real error handler * pcibr_error_handler() with the correct pcibr_soft struct. */ if (busnum == 0) { xwidget_error_register(xconn_vhdl, pcibr_error_handler_wrapper, pcibr_soft); } /* * Clear all pending interrupts. Assume all interrupts are from slot 3 * until otherise setup. */ pcireg_intr_reset_set(pcibr_soft, PIC_IRR_ALL_CLR); pcireg_intr_device_set(pcibr_soft, 0x006db6db); /* Setup the mapping register used for direct mapping */ pcibr_directmap_init(pcibr_soft); /* * Initialize the PICs control register. */ pic_ctrl_reg = pcireg_control_get(pcibr_soft); /* Bridges Requester ID: bus = busnum, dev = 0, func = 0 */ pic_ctrl_reg &= ~PIC_CTRL_BUS_NUM_MASK; pic_ctrl_reg |= PIC_CTRL_BUS_NUM(busnum); pic_ctrl_reg &= ~PIC_CTRL_DEV_NUM_MASK; pic_ctrl_reg &= ~PIC_CTRL_FUN_NUM_MASK; pic_ctrl_reg &= ~PIC_CTRL_NO_SNOOP; pic_ctrl_reg &= ~PIC_CTRL_RELAX_ORDER; /* enable parity checking on PICs internal RAM */ pic_ctrl_reg |= PIC_CTRL_PAR_EN_RESP; pic_ctrl_reg |= PIC_CTRL_PAR_EN_ATE; /* PIC BRINGUP WAR (PV# 862253): dont enable write request parity */ if (!PCIBR_WAR_ENABLED(PV862253, pcibr_soft)) { pic_ctrl_reg |= PIC_CTRL_PAR_EN_REQ; } pic_ctrl_reg |= PIC_CTRL_PAGE_SIZE; pcireg_control_set(pcibr_soft, pic_ctrl_reg); /* Initialize internal mapping entries (ie. the ATEs) */ for (entry = 0; entry < pcibr_soft->bs_int_ate_size; entry++) pcireg_int_ate_set(pcibr_soft, entry, 0); pcibr_soft->bs_int_ate_resource.start = 0; pcibr_soft->bs_int_ate_resource.end = pcibr_soft->bs_int_ate_size - 1; /* Setup the PICs error interrupt handler. */ xtalk_intr = xtalk_intr_alloc(xconn_vhdl, (device_desc_t)0, pcibr_vhdl); ASSERT(xtalk_intr != NULL); irq = ((hub_intr_t)xtalk_intr)->i_bit; cpu = ((hub_intr_t)xtalk_intr)->i_cpuid; intr_unreserve_level(cpu, irq); ((hub_intr_t)xtalk_intr)->i_bit = SGI_PCIBR_ERROR; xtalk_intr->xi_vector = SGI_PCIBR_ERROR; pcibr_soft->bsi_err_intr = xtalk_intr; /* * On IP35 with XBridge, we do some extra checks in pcibr_setwidint * in order to work around some addressing limitations. In order * for that fire wall to work properly, we need to make sure we * start from a known clean state. */ pcibr_clearwidint(pcibr_soft); xtalk_intr_connect(xtalk_intr, (intr_func_t) pcibr_error_intr_handler, (intr_arg_t) pcibr_soft, (xtalk_intr_setfunc_t) pcibr_setwidint, (void *) pcibr_soft); request_irq(SGI_PCIBR_ERROR, (void *)pcibr_error_intr_handler, SA_SHIRQ, "PCIBR error", (intr_arg_t) pcibr_soft); PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_vhdl, "pcibr_setwidint: target_id=0x%lx, int_addr=0x%lx\n", pcireg_intr_dst_target_id_get(pcibr_soft), pcireg_intr_dst_addr_get(pcibr_soft))); /* now we can start handling error interrupts */ int_enable = pcireg_intr_enable_get(pcibr_soft); int_enable |= PIC_ISR_ERRORS; /* PIC BRINGUP WAR (PV# 856864 & 856865): allow the tnums that are * locked out to be freed up sooner (by timing out) so that the * read tnums are never completely used up. */ if (PCIBR_WAR_ENABLED(PV856864, pcibr_soft)) { int_enable &= ~PIC_ISR_PCIX_REQ_TOUT; int_enable &= ~PIC_ISR_XREAD_REQ_TIMEOUT; pcireg_req_timeout_set(pcibr_soft, 0x750); } pcireg_intr_enable_set(pcibr_soft, int_enable); pcireg_intr_mode_set(pcibr_soft, 0); /* dont send 'clear interrupt' pkts */ pcireg_tflush_get(pcibr_soft); /* wait until Bridge PIO complete */ /* * PIC BRINGUP WAR (PV# 856866, 859504, 861476, 861478): Don't use * RRB0, RRB8, RRB1, and RRB9. Assign them to DEVICE[2|3]--VCHAN3 * so they are not used. This works since there is currently no * API to penable VCHAN3. */ if (PCIBR_WAR_ENABLED(PV856866, pcibr_soft)) { pcireg_rrb_bit_set(pcibr_soft, 0, 0x000f000f); /* even rrb reg */ pcireg_rrb_bit_set(pcibr_soft, 1, 0x000f000f); /* odd rrb reg */ } /* PIC only supports 64-bit direct mapping in PCI-X mode. Since * all PCI-X devices that initiate memory transactions must be * capable of generating 64-bit addressed, we force 64-bit DMAs. */ pcibr_soft->bs_dma_flags = 0; if (IS_PCIX(pcibr_soft)) { pcibr_soft->bs_dma_flags |= PCIIO_DMA_A64; } { iopaddr_t prom_base_addr = pcibr_soft->bs_xid << 24; int prom_base_size = 0x1000000; int status; struct resource *res; /* Allocate resource maps based on bus page size; for I/O and memory * space, free all pages except those in the base area and in the * range set by the PROM. * * PROM creates BAR addresses in this format: 0x0ws00000 where w is * the widget number and s is the device register offset for the slot. */ /* Setup the Bus's PCI IO Root Resource. */ pcibr_soft->bs_io_win_root_resource.start = PCIBR_BUS_IO_BASE; pcibr_soft->bs_io_win_root_resource.end = 0xffffffff; res = (struct resource *) kmalloc( sizeof(struct resource), GFP_KERNEL); if (!res) panic("PCIBR:Unable to allocate resource structure\n"); /* Block off the range used by PROM. */ res->start = prom_base_addr; res->end = prom_base_addr + (prom_base_size - 1); status = request_resource(&pcibr_soft->bs_io_win_root_resource, res); if (status) panic("PCIBR:Unable to request_resource()\n"); /* Setup the Small Window Root Resource */ pcibr_soft->bs_swin_root_resource.start = PAGE_SIZE; pcibr_soft->bs_swin_root_resource.end = 0x000FFFFF; /* Setup the Bus's PCI Memory Root Resource */ pcibr_soft->bs_mem_win_root_resource.start = 0x200000; pcibr_soft->bs_mem_win_root_resource.end = 0xffffffff; res = (struct resource *) kmalloc( sizeof(struct resource), GFP_KERNEL); if (!res) panic("PCIBR:Unable to allocate resource structure\n"); /* Block off the range used by PROM. */ res->start = prom_base_addr; res->end = prom_base_addr + (prom_base_size - 1); status = request_resource(&pcibr_soft->bs_mem_win_root_resource, res); if (status) panic("PCIBR:Unable to request_resource()\n"); } /* build "no-slot" connection point */ pcibr_info = pcibr_device_info_new(pcibr_soft, PCIIO_SLOT_NONE, PCIIO_FUNC_NONE, PCIIO_VENDOR_ID_NONE, PCIIO_DEVICE_ID_NONE); noslot_conn = pciio_device_info_register(pcibr_vhdl, &pcibr_info->f_c); /* Store no slot connection point info for tearing it down during detach. */ pcibr_soft->bs_noslot_conn = noslot_conn; pcibr_soft->bs_noslot_info = pcibr_info; for (slot = pcibr_soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) { /* Find out what is out there */ (void)pcibr_slot_info_init(pcibr_vhdl, slot); } for (slot = pcibr_soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) { /* Set up the address space for this slot in the PCI land */ (void)pcibr_slot_addr_space_init(pcibr_vhdl, slot); } for (slot = pcibr_soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) { /* Setup the device register */ (void)pcibr_slot_device_init(pcibr_vhdl, slot); } if (IS_PCIX(pcibr_soft)) { pcibr_soft->bs_pcix_rbar_inuse = 0; pcibr_soft->bs_pcix_rbar_avail = NUM_RBAR; pcibr_soft->bs_pcix_rbar_percent_allowed = pcibr_pcix_rbars_calc(pcibr_soft); for (slot = pcibr_soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) { /* Setup the PCI-X Read Buffer Attribute Registers (RBARs) */ (void)pcibr_slot_pcix_rbar_init(pcibr_soft, slot); } } for (slot = pcibr_soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) { /* Setup host/guest relations */ (void)pcibr_slot_guest_info_init(pcibr_vhdl, slot); } /* Handle initial RRB management */ pcibr_initial_rrb(pcibr_vhdl, pcibr_soft->bs_first_slot, pcibr_soft->bs_last_slot); /* Before any drivers get called that may want to re-allocate RRB's, * let's get some special cases pre-allocated. Drivers may override * these pre-allocations, but by doing pre-allocations now we're * assured not to step all over what the driver intended. */ if (pcibr_soft->bs_bricktype > 0) { switch (pcibr_soft->bs_bricktype) { case MODULE_PXBRICK: case MODULE_IXBRICK: case MODULE_OPUSBRICK: /* * If IO9 in bus 1, allocate RRBs to all the IO9 devices */ if ((pcibr_widget_to_bus(pcibr_vhdl) == 1) && (pcibr_soft->bs_slot[0].bss_vendor_id == 0x10A9) && (pcibr_soft->bs_slot[0].bss_device_id == 0x100A)) { pcibr_rrb_alloc_init(pcibr_soft, 0, VCHAN0, 4); pcibr_rrb_alloc_init(pcibr_soft, 1, VCHAN0, 4); pcibr_rrb_alloc_init(pcibr_soft, 2, VCHAN0, 4); pcibr_rrb_alloc_init(pcibr_soft, 3, VCHAN0, 4); } else { pcibr_rrb_alloc_init(pcibr_soft, 0, VCHAN0, 4); pcibr_rrb_alloc_init(pcibr_soft, 1, VCHAN0, 4); } break; case MODULE_CGBRICK: pcibr_rrb_alloc_init(pcibr_soft, 0, VCHAN0, 8); break; } /* switch */ } for (slot = pcibr_soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) { /* Call the device attach */ (void)pcibr_slot_call_device_attach(pcibr_vhdl, slot, 0); } pciio_device_attach(noslot_conn, 0); return 0; }
/* * Associate a set of pciio_provider functions with a vertex. */ void pciio_provider_register(vertex_hdl_t provider, pciio_provider_t *pciio_fns) { hwgraph_info_add_LBL(provider, INFO_LBL_PFUNCS, (arbitrary_info_t) pciio_fns); }