pciio_info_t pciio_info_get(vertex_hdl_t pciio) { pciio_info_t pciio_info; pciio_info = (pciio_info_t) hwgraph_fastinfo_get(pciio); #ifdef DEBUG_PCIIO { int pos; char dname[256]; pos = devfs_generate_path(pciio, dname, 256); printk("%s : path= %s\n", __FUNCTION__, &dname[pos]); } #endif /* DEBUG_PCIIO */ if ((pciio_info != NULL) && (pciio_info->c_fingerprint != pciio_info_fingerprint) && (pciio_info->c_fingerprint != NULL)) { return((pciio_info_t)-1); /* Should panic .. */ } return pciio_info; }
pciio_info_t pciio_device_info_new( pciio_info_t pciio_info, devfs_handle_t master, pciio_slot_t slot, pciio_function_t func, pciio_vendor_id_t vendor_id, pciio_device_id_t device_id) { if (!pciio_info) NEW(pciio_info); ASSERT(pciio_info != NULL); pciio_info->c_slot = slot; pciio_info->c_func = func; pciio_info->c_vendor = vendor_id; pciio_info->c_device = device_id; pciio_info->c_master = master; pciio_info->c_mfast = hwgraph_fastinfo_get(master); pciio_info->c_pops = pciio_provider_fns_get(master); pciio_info->c_efunc = 0; pciio_info->c_einfo = 0; return pciio_info; }
xwidget_info_t xwidget_info_get(vertex_hdl_t xwidget) { xwidget_info_t widget_info; widget_info = (xwidget_info_t) hwgraph_fastinfo_get(xwidget); return (widget_info); }
pciio_info_t pciio_info_get(devfs_handle_t pciio) { pciio_info_t pciio_info; pciio_info = (pciio_info_t) hwgraph_fastinfo_get(pciio); #ifdef DEBUG_PCIIO { int pos; char dname[256]; pos = devfs_generate_path(pciio, dname, 256); printk("%s : path= %s\n", __FUNCTION__, &dname[pos]); } #endif /* DEBUG_PCIIO */ #ifdef BRINGUP if ((pciio_info != NULL) && (pciio_info->c_fingerprint != pciio_info_fingerprint) && (pciio_info->c_fingerprint != NULL)) { #else if ((pciio_info != NULL) && (pciio_info->c_fingerprint != pciio_info_fingerprint)) { #endif /* BRINGUP */ return((pciio_info_t)-1); /* Should panic .. */ } return pciio_info; } void pciio_info_set(devfs_handle_t pciio, pciio_info_t pciio_info) { if (pciio_info != NULL) pciio_info->c_fingerprint = pciio_info_fingerprint; hwgraph_fastinfo_set(pciio, (arbitrary_info_t) pciio_info); /* Also, mark this vertex as a PCI slot * and use the pciio_info, so pciio_info_chk * can work (and be fairly efficient). */ hwgraph_info_add_LBL(pciio, INFO_LBL_PCIIO, (arbitrary_info_t) pciio_info); } devfs_handle_t pciio_info_dev_get(pciio_info_t pciio_info) { return (pciio_info->c_vertex); } /*ARGSUSED*/ pciio_bus_t pciio_info_bus_get(pciio_info_t pciio_info) { /* XXX for now O2 always gets back bus 0 */ return (pciio_bus_t)0; } pciio_slot_t pciio_info_slot_get(pciio_info_t pciio_info) { return (pciio_info->c_slot); } pciio_function_t pciio_info_function_get(pciio_info_t pciio_info) { return (pciio_info->c_func); } pciio_vendor_id_t pciio_info_vendor_id_get(pciio_info_t pciio_info) { return (pciio_info->c_vendor); } pciio_device_id_t pciio_info_device_id_get(pciio_info_t pciio_info) { return (pciio_info->c_device); } devfs_handle_t pciio_info_master_get(pciio_info_t pciio_info) { return (pciio_info->c_master); } arbitrary_info_t pciio_info_mfast_get(pciio_info_t pciio_info) { return (pciio_info->c_mfast); } pciio_provider_t * pciio_info_pops_get(pciio_info_t pciio_info) { return (pciio_info->c_pops); } error_handler_f * pciio_info_efunc_get(pciio_info_t pciio_info) { return (pciio_info->c_efunc); } error_handler_arg_t * pciio_info_einfo_get(pciio_info_t pciio_info) { return (pciio_info->c_einfo); } pciio_space_t pciio_info_bar_space_get(pciio_info_t info, int win) { return info->c_window[win].w_space; } iopaddr_t pciio_info_bar_base_get(pciio_info_t info, int win) { return info->c_window[win].w_base; } size_t pciio_info_bar_size_get(pciio_info_t info, int win) { return info->c_window[win].w_size; } iopaddr_t pciio_info_rom_base_get(pciio_info_t info) { return info->c_rbase; } size_t pciio_info_rom_size_get(pciio_info_t info) { return info->c_rsize; } /* ===================================================================== * GENERIC PCI INITIALIZATION FUNCTIONS */ /* * pciioinit: called once during device driver * initializtion if this driver is configured into * the system. */ void pciio_init(void) { cdl_p cp; #if DEBUG && ATTACH_DEBUG printf("pciio_init\n"); #endif /* Allocate the registry. * We might already have one. * If we don't, go get one. * MPness: someone might have * set one up for us while we * were not looking; use an atomic * compare-and-swap to commit to * using the new registry if and * only if nobody else did first. * If someone did get there first, * toss the one we allocated back * into the pool. */ if (pciio_registry == NULL) { cp = cdl_new(EDGE_LBL_PCI, "vendor", "device"); if (!compare_and_swap_ptr((void **) &pciio_registry, NULL, (void *) cp)) { cdl_del(cp); } } ASSERT(pciio_registry != NULL); }
/* * Obtain a pointer to the xtalk_provider functions for a specified Crosstalk * provider. */ xtalk_provider_t * xtalk_provider_fns_get(vertex_hdl_t provider) { return ((xtalk_provider_t *) hwgraph_fastinfo_get(provider)); }
static int synergy_perf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { int cnode; nodepda_t *npdap; synergy_perf_t *p; int intarg; int fsb; uint64_t longarg; uint64_t *stats; int n; devfs_handle_t d; arbitrary_info_t info; if ((d = devfs_get_handle_from_inode(inode)) == NULL) return -ENODEV; info = hwgraph_fastinfo_get(d); cnode = SYNERGY_PERF_INFO_CNODE(info); fsb = SYNERGY_PERF_INFO_FSB(info); npdap = NODEPDA(cnode); switch (cmd) { case SNDRV_GET_SYNERGY_VERSION: /* return int, version of data structure for SNDRV_GET_SYNERGYINFO */ intarg = 1; /* version 1 */ if (copy_to_user((void *)arg, &intarg, sizeof(intarg))) return -EFAULT; break; case SNDRV_GET_INFOSIZE: /* return int, sizeof buf needed for SYNERGY_PERF_GET_STATS */ intarg = synergy_perf_size(npdap); if (copy_to_user((void *)arg, &intarg, sizeof(intarg))) return -EFAULT; break; case SNDRV_GET_SYNERGYINFO: /* return array of event/value pairs, this node only */ if ((intarg = synergy_perf_size(npdap)) <= 0) return -ENODATA; if ((stats = (uint64_t *)kmalloc(intarg, GFP_KERNEL)) == NULL) return -ENOMEM; spin_lock_irq(&npdap->synergy_perf_lock); for (n=0, p = npdap->synergy_perf_first; p;) { stats[n++] = p->modesel; if (p->intervals > 0) stats[n++] = p->counts[fsb] * p->total_intervals / p->intervals; else stats[n++] = 0; p = p->next; if (p == npdap->synergy_perf_first) break; } spin_unlock_irq(&npdap->synergy_perf_lock); if (copy_to_user((void *)arg, stats, intarg)) { kfree(stats); return -EFAULT; } kfree(stats); break; case SNDRV_SYNERGY_APPEND: /* reads 64bit event, append synergy perf event to all nodes */ if (copy_from_user(&longarg, (void *)arg, sizeof(longarg))) return -EFAULT; return synergy_perf_append(longarg); break; case SNDRV_GET_SYNERGY_STATUS: /* return int, 1 if enabled else 0 */ intarg = npdap->synergy_perf_enabled; if (copy_to_user((void *)arg, &intarg, sizeof(intarg))) return -EFAULT; break; case SNDRV_SYNERGY_ENABLE: /* read int, if true enable counting else disable */ if (copy_from_user(&intarg, (void *)arg, sizeof(intarg))) return -EFAULT; synergy_perf_set_enable(intarg); break; case SNDRV_SYNERGY_FREQ: /* read int, set jiffies per update */ if (copy_from_user(&intarg, (void *)arg, sizeof(intarg))) return -EFAULT; if (intarg < 0 || intarg >= HZ) return -EINVAL; synergy_perf_set_freq(intarg); break; default: printk("Warning: invalid ioctl %d on synergy mon for cnode=%d fsb=%d\n", cmd, cnode, fsb); return -EINVAL; } return(0); }
/* * sn_pci_fixup() - This routine is called when platform_pci_fixup() is * invoked at the end of pcibios_init() to link the Linux pci * infrastructure to SGI IO Infrasturcture - ia64/kernel/pci.c * * Other platform specific fixup can also be done here. */ void sn_pci_fixup(int arg) { struct list_head *ln; struct pci_bus *pci_bus = NULL; struct pci_dev *device_dev = NULL; struct sn_widget_sysdata *widget_sysdata; struct sn_device_sysdata *device_sysdata; #ifdef SN_IOPORTS unsigned long ioport; #endif pciio_intr_t intr_handle; int cpuid, bit; devfs_handle_t device_vertex; pciio_intr_line_t lines; extern void sn_pci_find_bios(void); #ifdef CONFIG_IA64_SGI_SN2 extern int numnodes; int cnode; #endif /* CONFIG_IA64_SGI_SN2 */ if (arg == 0) { sn_init_irq_desc(); sn_pci_find_bios(); #ifdef CONFIG_IA64_SGI_SN2 for (cnode = 0; cnode < numnodes; cnode++) { extern void intr_init_vecblk(nodepda_t *npda, cnodeid_t, int); intr_init_vecblk(NODEPDA(cnode), cnode, 0); } #endif /* CONFIG_IA64_SGI_SN2 */ return; } #if 0 { devfs_handle_t bridge_vhdl = pci_bus_to_vertex(0); pcibr_soft_t pcibr_soft = (pcibr_soft_t) hwgraph_fastinfo_get(bridge_vhdl); bridge_t *bridge = pcibr_soft->bs_base; printk("pci_fixup_ioc3: Before devreg fixup\n"); printk("pci_fixup_ioc3: Devreg 0 0x%x\n", bridge->b_device[0].reg); printk("pci_fixup_ioc3: Devreg 1 0x%x\n", bridge->b_device[1].reg); printk("pci_fixup_ioc3: Devreg 2 0x%x\n", bridge->b_device[2].reg); printk("pci_fixup_ioc3: Devreg 3 0x%x\n", bridge->b_device[3].reg); printk("pci_fixup_ioc3: Devreg 4 0x%x\n", bridge->b_device[4].reg); printk("pci_fixup_ioc3: Devreg 5 0x%x\n", bridge->b_device[5].reg); printk("pci_fixup_ioc3: Devreg 6 0x%x\n", bridge->b_device[6].reg); printk("pci_fixup_ioc3: Devreg 7 0x%x\n", bridge->b_device[7].reg); } #endif done_probing = 1; /* * Initialize the pci bus vertex in the pci_bus struct. */ for( ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) { pci_bus = pci_bus_b(ln); widget_sysdata = kmalloc(sizeof(struct sn_widget_sysdata), GFP_KERNEL); widget_sysdata->vhdl = pci_bus_to_vertex(pci_bus->number); pci_bus->sysdata = (void *)widget_sysdata; } /* * set the root start and end so that drivers calling check_region() * won't see a conflict */ #ifdef SN_IOPORTS ioport_resource.start = sn_ioport_num; ioport_resource.end = 0xffff; #else #if defined(CONFIG_IA64_SGI_SN1) if ( IS_RUNNING_ON_SIMULATOR() ) { /* * IDE legacy IO PORTs are supported in Medusa. * Just open up IO PORTs from 0 .. ioport_resource.end. */ ioport_resource.start = 0; } else { /* * We do not support Legacy IO PORT numbers. */ ioport_resource.start |= IO_SWIZ_BASE | __IA64_UNCACHED_OFFSET; } ioport_resource.end |= (HSPEC_SWIZ_BASE-1) | __IA64_UNCACHED_OFFSET; #else // Need something here for sn2.... ZXZXZX #endif #endif /* * Set the root start and end for Mem Resource. */ iomem_resource.start = 0; iomem_resource.end = 0xffffffffffffffff; /* * Initialize the device vertex in the pci_dev struct. */ pci_for_each_dev(device_dev) { unsigned int irq; int idx; u16 cmd; devfs_handle_t vhdl; unsigned long size; extern int bit_pos_to_irq(int); if (device_dev->vendor == PCI_VENDOR_ID_SGI && device_dev->device == PCI_DEVICE_ID_SGI_IOC3) { extern void pci_fixup_ioc3(struct pci_dev *d); pci_fixup_ioc3(device_dev); } /* Set the device vertex */ device_sysdata = kmalloc(sizeof(struct sn_device_sysdata), GFP_KERNEL); device_sysdata->vhdl = devfn_to_vertex(device_dev->bus->number, device_dev->devfn); device_sysdata->isa64 = 0; /* * Set the xbridge Device(X) Write Buffer Flush and Xbow Flush * register addresses. */ (void) set_flush_addresses(device_dev, device_sysdata); device_dev->sysdata = (void *) device_sysdata; set_sn_pci64(device_dev); pci_read_config_word(device_dev, PCI_COMMAND, &cmd); /* * Set the resources address correctly. The assumption here * is that the addresses in the resource structure has been * read from the card and it was set in the card by our * Infrastructure .. */ vhdl = device_sysdata->vhdl; for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) { size = 0; size = device_dev->resource[idx].end - device_dev->resource[idx].start; if (size) { device_dev->resource[idx].start = (unsigned long)pciio_pio_addr(vhdl, 0, PCIIO_SPACE_WIN(idx), 0, size, 0, PCIIO_BYTE_STREAM); device_dev->resource[idx].start |= __IA64_UNCACHED_OFFSET; } else continue; device_dev->resource[idx].end = device_dev->resource[idx].start + size; #ifdef CONFIG_IA64_SGI_SN1 /* * Adjust the addresses to go to the SWIZZLE .. */ device_dev->resource[idx].start = device_dev->resource[idx].start & 0xfffff7ffffffffff; device_dev->resource[idx].end = device_dev->resource[idx].end & 0xfffff7ffffffffff; #endif if (device_dev->resource[idx].flags & IORESOURCE_IO) { cmd |= PCI_COMMAND_IO; #ifdef SN_IOPORTS ioport = sn_allocate_ioports(device_dev->resource[idx].start); if (ioport < 0) { printk("sn_pci_fixup: PCI Device 0x%x on PCI Bus %d not mapped to IO PORTs .. IO PORTs exhausted\n", device_dev->devfn, device_dev->bus->number); continue; } pciio_config_set(vhdl, (unsigned) PCI_BASE_ADDRESS_0 + (idx * 4), 4, (res + (ioport & 0xfff))); printk("sn_pci_fixup: ioport number %d mapped to pci address 0x%lx\n", ioport, (res + (ioport & 0xfff))); device_dev->resource[idx].start = ioport; device_dev->resource[idx].end = ioport + SN_IOPORTS_UNIT; #endif } if (device_dev->resource[idx].flags & IORESOURCE_MEM) cmd |= PCI_COMMAND_MEMORY; } /* * Now handle the ROM resource .. */ size = device_dev->resource[PCI_ROM_RESOURCE].end - device_dev->resource[PCI_ROM_RESOURCE].start; if (size) { device_dev->resource[PCI_ROM_RESOURCE].start = (unsigned long) pciio_pio_addr(vhdl, 0, PCIIO_SPACE_ROM, 0, size, 0, PCIIO_BYTE_STREAM); device_dev->resource[PCI_ROM_RESOURCE].start |= __IA64_UNCACHED_OFFSET; device_dev->resource[PCI_ROM_RESOURCE].end = device_dev->resource[PCI_ROM_RESOURCE].start + size; #ifdef CONFIG_IA64_SGI_SN1 /* * go through synergy swizzled space */ device_dev->resource[PCI_ROM_RESOURCE].start &= 0xfffff7ffffffffffUL; device_dev->resource[PCI_ROM_RESOURCE].end &= 0xfffff7ffffffffffUL; #endif } /* * Update the Command Word on the Card. */ cmd |= PCI_COMMAND_MASTER; /* If the device doesn't support */ /* bit gets dropped .. no harm */ pci_write_config_word(device_dev, PCI_COMMAND, cmd); pci_read_config_byte(device_dev, PCI_INTERRUPT_PIN, (unsigned char *)&lines); if (device_dev->vendor == PCI_VENDOR_ID_SGI && device_dev->device == PCI_DEVICE_ID_SGI_IOC3 ) { lines = 1; } device_sysdata = (struct sn_device_sysdata *)device_dev->sysdata; device_vertex = device_sysdata->vhdl; intr_handle = pciio_intr_alloc(device_vertex, NULL, lines, device_vertex); bit = intr_handle->pi_irq; cpuid = intr_handle->pi_cpu; #ifdef CONFIG_IA64_SGI_SN1 irq = bit_pos_to_irq(bit); #else /* SN2 */ irq = bit; #endif irq = irq + (cpuid << 8); pciio_intr_connect(intr_handle); device_dev->irq = irq; #ifdef ajmtestintr { int slot = PCI_SLOT(device_dev->devfn); static int timer_set = 0; pcibr_intr_t pcibr_intr = (pcibr_intr_t)intr_handle; pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft; extern void intr_test_handle_intr(int, void*, struct pt_regs *); if (!timer_set) { intr_test_set_timer(); timer_set = 1; } intr_test_register_irq(irq, pcibr_soft, slot); request_irq(irq, intr_test_handle_intr,0,NULL, NULL); } #endif } #if 0 { devfs_handle_t bridge_vhdl = pci_bus_to_vertex(0); pcibr_soft_t pcibr_soft = (pcibr_soft_t) hwgraph_fastinfo_get(bridge_vhdl); bridge_t *bridge = pcibr_soft->bs_base; printk("pci_fixup_ioc3: Before devreg fixup\n"); printk("pci_fixup_ioc3: Devreg 0 0x%x\n", bridge->b_device[0].reg); printk("pci_fixup_ioc3: Devreg 1 0x%x\n", bridge->b_device[1].reg); printk("pci_fixup_ioc3: Devreg 2 0x%x\n", bridge->b_device[2].reg); printk("pci_fixup_ioc3: Devreg 3 0x%x\n", bridge->b_device[3].reg); printk("pci_fixup_ioc3: Devreg 4 0x%x\n", bridge->b_device[4].reg); printk("pci_fixup_ioc3: Devreg 5 0x%x\n", bridge->b_device[5].reg); printk("pci_fixup_ioc3: Devreg 6 0x%x\n", bridge->b_device[6].reg); printk("pci_fixup_ioc3: Devreg 7 0x%x\n", bridge->b_device[7].reg); } printk("testing Big Window: 0xC0000200c0000000 %p\n", *( (volatile uint64_t *)0xc0000200a0000000)); printk("testing Big Window: 0xC0000200c0000008 %p\n", *( (volatile uint64_t *)0xc0000200a0000008)); #endif }