void pci_cfgregwrite(int bus, int slot, int func, int reg, u_int32_t data, int bytes) { struct ia64_sal_result res; res = ia64_sal_entry(SAL_PCI_CONFIG_WRITE, SAL_PCI_ADDRESS(bus, slot, func, reg), bytes, data, 0, 0, 0, 0); }
void ia64_mca_init(void) { struct ia64_sal_result result; uint64_t max_size; char *p; int i; /* * Get the sizes of the state information we can get from SAL and * allocate a common block (forgive me my Fortran :-) for use by * support functions. We create a region 7 address to make it * easy on the OS_MCA or OS_INIT handlers to get the state info * under unreliable conditions. */ max_size = 0; for (i = 0; i < SAL_INFO_TYPES; i++) { result = ia64_sal_entry(SAL_GET_STATE_INFO_SIZE, i, 0, 0, 0, 0, 0, 0); if (result.sal_status == 0) { mca_info_size[i] = result.sal_result[0]; if (mca_info_size[i] > max_size) max_size = mca_info_size[i]; } else mca_info_size[i] = -1; } max_size = round_page(max_size); p = contigmalloc(max_size, M_TEMP, M_WAITOK, 0ul, 256*1024*1024 - 1, PAGE_SIZE, 256*1024*1024); mca_info_block = IA64_PHYS_TO_RR7(ia64_tpa((u_int64_t)p)); if (bootverbose) printf("MCA: allocated %ld bytes for state information\n", max_size); /* * Initialize the spin lock used to protect the info block. When APs * get launched, there's a short moment of contention, but in all other * cases it's not a hot spot. I think it's possible to have the MCA * handler be called on multiple processors at the same time, but that * should be rare. On top of that, performance is not an issue when * dealing with machine checks... */ mtx_init(&mca_info_block_lock, "MCA spin lock", NULL, MTX_SPIN); /* * Get and save any processor and platfom error records. Note that in * a SMP configuration the processor records are for the BSP only. We * let the APs get and save their own records when we wake them up. */ for (i = 0; i < SAL_INFO_TYPES; i++) ia64_mca_save_state(i); }
u_int32_t pci_cfgregread(int bus, int slot, int func, int reg, int bytes) { struct ia64_sal_result res; res = ia64_sal_entry(SAL_PCI_CONFIG_READ, SAL_PCI_ADDRESS(bus, slot, func, reg), bytes, 0, 0, 0, 0, 0); if (res.sal_status < 0) return (~0); else return (res.sal_result[0]); }
void pci_conf_write(pci_chipset_tag_t pc, pcitag_t tag, int reg, pcireg_t val) { struct ia64_sal_result res; if ((unsigned int)reg >= PCI_CONF_SIZE) return; res = ia64_sal_entry(SAL_PCI_CONFIG_WRITE, tag | reg, sizeof(pcireg_t), val, 0, 0, 0, 0); if (res.sal_status < 0) printf("pci configuration write failed\n"); }
pcireg_t pci_conf_read(pci_chipset_tag_t pc, pcitag_t tag, int reg) { struct ia64_sal_result res; if ((unsigned int)reg >= PCI_CONF_SIZE) return -1; res = ia64_sal_entry(SAL_PCI_CONFIG_READ, tag | reg, sizeof(pcireg_t), 0, 0, 0, 0, 0); if (res.sal_status < 0) return -1; else return res.sal_result[0]; }
void pci_cfgregwrite(int bus, int slot, int func, int reg, uint32_t data, int len) { struct ia64_sal_result res; register_t is; u_long addr; addr = pci_sal_address(0, bus, slot, func, reg); if (addr == ~0ul) return; if (!pci_valid_access(reg, len)) return; is = intr_disable(); res = ia64_sal_entry(SAL_PCI_CONFIG_WRITE, addr, len, data, 0, 0, 0, 0); intr_restore(is); }
uint32_t pci_cfgregread(int bus, int slot, int func, int reg, int len) { struct ia64_sal_result res; register_t is; u_long addr; addr = pci_sal_address(0, bus, slot, func, reg); if (addr == ~0ul) return (~0); if (!pci_valid_access(reg, len)) return (~0); is = intr_disable(); res = ia64_sal_entry(SAL_PCI_CONFIG_READ, addr, len, 0, 0, 0, 0, 0); intr_restore(is); return ((res.sal_status < 0) ? ~0 : res.sal_result[0]); }
void ia64_mca_save_state(int type) { struct ia64_sal_result result; struct mca_record_header *hdr; struct sysctl_oid *oidp; char *name, *state; uint64_t seqnr; size_t recsz, totsz; /* * Don't try to get the state if we couldn't get the size of * the state information previously. */ if (mca_info_size[type] == -1) return; while (1) { mtx_lock_spin(&mca_info_block_lock); result = ia64_sal_entry(SAL_GET_STATE_INFO, type, 0, mca_info_block, 0, 0, 0, 0); if (result.sal_status < 0) { mtx_unlock_spin(&mca_info_block_lock); return; } hdr = (struct mca_record_header *)mca_info_block; recsz = hdr->rh_length; seqnr = hdr->rh_seqnr; mtx_unlock_spin(&mca_info_block_lock); totsz = sizeof(struct sysctl_oid) + recsz + 32; oidp = malloc(totsz, M_MCA, M_WAITOK|M_ZERO); state = (char*)(oidp + 1); name = state + recsz; sprintf(name, "%lld", (long long)seqnr); mtx_lock_spin(&mca_info_block_lock); /* * If the info block doesn't have our record anymore because * we temporarily unlocked it, get it again from SAL. I assume * that it's possible that we could get a different record. * I expect this to happen in a SMP configuration where the * record has been cleared by a different processor. So, if * we get a different record we simply abort with this record * and start over. */ if (seqnr != hdr->rh_seqnr) { result = ia64_sal_entry(SAL_GET_STATE_INFO, type, 0, mca_info_block, 0, 0, 0, 0); if (seqnr != hdr->rh_seqnr) { mtx_unlock_spin(&mca_info_block_lock); free(oidp, M_MCA); continue; } } bcopy((char*)mca_info_block, state, recsz); oidp->oid_parent = &sysctl__hw_mca_children; oidp->oid_number = OID_AUTO; oidp->oid_kind = CTLTYPE_OPAQUE|CTLFLAG_RD|CTLFLAG_DYN; oidp->oid_arg1 = state; oidp->oid_arg2 = recsz; oidp->oid_name = name; oidp->oid_handler = mca_sysctl_handler; oidp->oid_fmt = "S,MCA"; oidp->descr = "Error record"; sysctl_register_oid(oidp); if (mca_count > 0) { if (seqnr < mca_first) mca_first = seqnr; else if (seqnr > mca_last) mca_last = seqnr; } else mca_first = mca_last = seqnr; mca_count++; /* * Clear the state so that we get any other records when * they exist. */ result = ia64_sal_entry(SAL_CLEAR_STATE_INFO, type, 0, 0, 0, 0, 0, 0); mtx_unlock_spin(&mca_info_block_lock); } }
void ia64_sal_init(void) { static int sizes[6] = { 48, 32, 16, 32, 16, 16 }; u_int8_t *p; int i; sal_systbl = efi_get_table(&sal_table); if (sal_systbl == NULL) return; if (memcmp(sal_systbl->sal_signature, SAL_SIGNATURE, 4)) { printf("Bad signature for SAL System Table\n"); return; } p = (u_int8_t *) (sal_systbl + 1); for (i = 0; i < sal_systbl->sal_entry_count; i++) { switch (*p) { case 0: { struct sal_entrypoint_descriptor *dp; dp = (struct sal_entrypoint_descriptor*)p; ia64_pal_entry = IA64_PHYS_TO_RR7(dp->sale_pal_proc); if (bootverbose) printf("PAL Proc at 0x%lx\n", ia64_pal_entry); sal_fdesc.func = IA64_PHYS_TO_RR7(dp->sale_sal_proc); sal_fdesc.gp = IA64_PHYS_TO_RR7(dp->sale_sal_gp); if (bootverbose) printf("SAL Proc at 0x%lx, GP at 0x%lx\n", sal_fdesc.func, sal_fdesc.gp); ia64_sal_entry = (sal_entry_t *) &sal_fdesc; break; } case 5: { struct sal_ap_wakeup_descriptor *dp; #ifdef SMP struct ia64_sal_result result; struct ia64_fdesc *fd; #endif dp = (struct sal_ap_wakeup_descriptor*)p; if (dp->sale_mechanism != 0) { printf("SAL: unsupported AP wake-up mechanism " "(%d)\n", dp->sale_mechanism); break; } if (dp->sale_vector < 0x10 || dp->sale_vector > 0xff) { printf("SAL: invalid AP wake-up vector " "(0x%lx)\n", dp->sale_vector); break; } /* * SAL documents that the wake-up vector should be * high (close to 255). The MCA rendezvous vector * should be less than the wake-up vector, but still * "high". We use the following priority assignment: * Wake-up: priority of the sale_vector * Rendezvous: priority-1 * Generic IPIs: priority-2 * Special IPIs: priority-3 * Consequently, the wake-up priority should be at * least 4 (ie vector >= 0x40). */ if (dp->sale_vector < 0x40) { printf("SAL: AP wake-up vector too low " "(0x%lx)\n", dp->sale_vector); break; } if (bootverbose) printf("SAL: AP wake-up vector: 0x%lx\n", dp->sale_vector); ipi_vector[IPI_AP_WAKEUP] = dp->sale_vector; setup_ipi_vectors(dp->sale_vector & 0xf0); #ifdef SMP fd = (struct ia64_fdesc *) os_boot_rendez; result = ia64_sal_entry(SAL_SET_VECTORS, SAL_OS_BOOT_RENDEZ, ia64_tpa(fd->func), ia64_tpa(fd->gp), 0, 0, 0, 0); #endif break; } } p += sizes[*p]; } if (ipi_vector[IPI_AP_WAKEUP] == 0) setup_ipi_vectors(0xf0); }