void * acpi_find_table(const char *sig) { ACPI_PHYSICAL_ADDRESS rsdp_ptr; ACPI_TABLE_RSDP *rsdp; ACPI_TABLE_XSDT *xsdt; ACPI_TABLE_HEADER *table; UINT64 addr; u_int i, count; if ((rsdp_ptr = AcpiOsGetRootPointer()) == 0) return (NULL); rsdp = (ACPI_TABLE_RSDP *)IA64_PHYS_TO_RR7(rsdp_ptr); xsdt = (ACPI_TABLE_XSDT *)IA64_PHYS_TO_RR7(rsdp->XsdtPhysicalAddress); count = (UINT64 *)((char *)xsdt + xsdt->Header.Length) - xsdt->TableOffsetEntry; for (i = 0; i < count; i++) { addr = xsdt->TableOffsetEntry[i]; table = (ACPI_TABLE_HEADER *)IA64_PHYS_TO_RR7(addr); if (strncmp(table->Signature, sig, ACPI_NAME_SIZE) != 0) continue; if (ACPI_FAILURE(AcpiTbChecksum((void *)table, table->Length))) continue; return (table); } return (NULL); }
void * uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) { static vm_pindex_t color; void *va; vm_page_t m; int pflags; *flags = UMA_SLAB_PRIV; if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; else pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; if (wait & M_ZERO) pflags |= VM_ALLOC_ZERO; for (;;) { m = vm_page_alloc(NULL, color++, pflags | VM_ALLOC_NOOBJ); if (m == NULL) { if (wait & M_NOWAIT) return (NULL); VM_WAIT; } else break; } va = (void *)IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m)); if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) bzero(va, PAGE_SIZE); return (va); }
void * uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) { void *va; vm_page_t m; int pflags; *flags = UMA_SLAB_PRIV; pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED; for (;;) { m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ); if (m == NULL) { if (wait & M_NOWAIT) return (NULL); VM_WAIT; } else break; } va = (void *)IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m)); if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) bzero(va, PAGE_SIZE); return (va); }
/* * Count the number of local SAPIC entries in the APIC table. Every enabled * entry corresponds to a processor. */ int ia64_count_cpus(void) { ACPI_PHYSICAL_ADDRESS rsdp_ptr; ACPI_MADT_LOCAL_SAPIC *entry; ACPI_TABLE_MADT *table; ACPI_TABLE_RSDP *rsdp; ACPI_TABLE_XSDT *xsdt; char *end, *p; int cpus, t, tables; if ((rsdp_ptr = AcpiOsGetRootPointer()) == 0) return (0); rsdp = (ACPI_TABLE_RSDP *)IA64_PHYS_TO_RR7(rsdp_ptr); xsdt = (ACPI_TABLE_XSDT *)IA64_PHYS_TO_RR7(rsdp->XsdtPhysicalAddress); tables = (UINT64 *)((char *)xsdt + xsdt->Header.Length) - xsdt->TableOffsetEntry; cpus = 0; for (t = 0; t < tables; t++) { table = (ACPI_TABLE_MADT *) IA64_PHYS_TO_RR7(xsdt->TableOffsetEntry[t]); if (strncmp(table->Header.Signature, ACPI_SIG_MADT, ACPI_NAME_SIZE) != 0 || ACPI_FAILURE(AcpiTbChecksum((void *)table, table->Header.Length))) continue; end = (char *)table + table->Header.Length; p = (char *)(table + 1); while (p < end) { entry = (ACPI_MADT_LOCAL_SAPIC *)p; if (entry->Header.Type == ACPI_MADT_TYPE_LOCAL_SAPIC && (entry->LapicFlags & ACPI_MADT_ENABLED)) cpus++; p += entry->Header.Length; } } return (cpus); }
struct efi_md * efi_md_first(void) { if (bootinfo.bi_memmap == 0) return (NULL); return ((struct efi_md *)IA64_PHYS_TO_RR7(bootinfo.bi_memmap)); }
struct efi_md * efi_md_next(struct efi_md *md) { uint64_t plim; plim = IA64_PHYS_TO_RR7(bootinfo.bi_memmap + bootinfo.bi_memmap_size); md = (struct efi_md *)((uintptr_t)md + bootinfo.bi_memdesc_size); return ((md >= (struct efi_md *)plim) ? NULL : md); }
void ia64_mca_init(void) { struct ia64_sal_result result; uint64_t max_size; char *p; int i; /* * Get the sizes of the state information we can get from SAL and * allocate a common block (forgive me my Fortran :-) for use by * support functions. We create a region 7 address to make it * easy on the OS_MCA or OS_INIT handlers to get the state info * under unreliable conditions. */ max_size = 0; for (i = 0; i < SAL_INFO_TYPES; i++) { result = ia64_sal_entry(SAL_GET_STATE_INFO_SIZE, i, 0, 0, 0, 0, 0, 0); if (result.sal_status == 0) { mca_info_size[i] = result.sal_result[0]; if (mca_info_size[i] > max_size) max_size = mca_info_size[i]; } else mca_info_size[i] = -1; } max_size = round_page(max_size); p = contigmalloc(max_size, M_TEMP, M_WAITOK, 0ul, 256*1024*1024 - 1, PAGE_SIZE, 256*1024*1024); mca_info_block = IA64_PHYS_TO_RR7(ia64_tpa((u_int64_t)p)); if (bootverbose) printf("MCA: allocated %ld bytes for state information\n", max_size); /* * Initialize the spin lock used to protect the info block. When APs * get launched, there's a short moment of contention, but in all other * cases it's not a hot spot. I think it's possible to have the MCA * handler be called on multiple processors at the same time, but that * should be rare. On top of that, performance is not an issue when * dealing with machine checks... */ mtx_init(&mca_info_block_lock, "MCA spin lock", NULL, MTX_SPIN); /* * Get and save any processor and platfom error records. Note that in * a SMP configuration the processor records are for the BSP only. We * let the APs get and save their own records when we wake them up. */ for (i = 0; i < SAL_INFO_TYPES; i++) ia64_mca_save_state(i); }
/* * Collect the entry points for PAL and SAL. Be extra careful about NULL * pointer values. We're running pre-console, so it's better to return * error values than to cause panics, machine checks and other traps and * faults. Keep this minimal... */ int efi_boot_minimal(uint64_t systbl) { struct efi_md *md; efi_status status; if (systbl == 0) return (EINVAL); efi_systbl = (struct efi_systbl *)IA64_PHYS_TO_RR7(systbl); if (efi_systbl->st_hdr.th_sig != EFI_SYSTBL_SIG) { efi_systbl = NULL; return (EFAULT); } efi_cfgtbl = (efi_systbl->st_cfgtbl == 0) ? NULL : (struct efi_cfgtbl *)IA64_PHYS_TO_RR7(efi_systbl->st_cfgtbl); if (efi_cfgtbl == NULL) return (ENOENT); efi_runtime = (efi_systbl->st_rt == 0) ? NULL : (struct efi_rt *)IA64_PHYS_TO_RR7(efi_systbl->st_rt); if (efi_runtime == NULL) return (ENOENT); /* * Relocate runtime memory segments for firmware. */ md = efi_md_first(); while (md != NULL) { if (md->md_attr & EFI_MD_ATTR_RT) { if (md->md_attr & EFI_MD_ATTR_WB) md->md_virt = (void *)IA64_PHYS_TO_RR7(md->md_phys); else if (md->md_attr & EFI_MD_ATTR_UC) md->md_virt = pmap_mapdev(md->md_phys, md->md_pages * EFI_PAGE_SIZE); } md = efi_md_next(md); } status = ia64_call_efi_physical((uint64_t)efi_runtime->rt_setvirtual, bootinfo.bi_memmap_size, bootinfo.bi_memdesc_size, bootinfo.bi_memdesc_version, bootinfo.bi_memmap, 0); return ((status < 0) ? EFAULT : 0); }
void * efi_get_table(struct uuid *uuid) { struct efi_cfgtbl *ct; u_long count; if (efi_cfgtbl == NULL) return (NULL); count = efi_systbl->st_entries; ct = efi_cfgtbl; while (count--) { if (!bcmp(&ct->ct_uuid, uuid, sizeof(*uuid))) return ((void *)IA64_PHYS_TO_RR7(ct->ct_data)); ct++; } return (NULL); }
static int mem_phys2virt(vm_offset_t offset, int prot, void **ptr, u_long *limit) { struct efi_md *md; if (prot & ~(VM_PROT_READ | VM_PROT_WRITE)) return (EPERM); md = efi_md_find(offset); if (md == NULL) return (EFAULT); if (md->md_type == EFI_MD_TYPE_BAD) return (EIO); *ptr = (void *)((md->md_attr & EFI_MD_ATTR_WB) ? IA64_PHYS_TO_RR7(offset) : IA64_PHYS_TO_RR6(offset)); *limit = (md->md_pages * EFI_PAGE_SIZE) - (offset - md->md_phys); return (0); }
/* * allow user processes to MMAP some memory sections * instead of going through read/write */ int memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int prot, vm_memattr_t *memattr) { /* * /dev/mem is the only one that makes sense through this * interface. For /dev/kmem any physaddr we return here * could be transient and hence incorrect or invalid at * a later time. */ if (dev2unit(dev) != CDEV_MINOR_MEM) return (-1); /* * Allow access only in RAM. */ if ((prot & ia64_pa_access(atop((vm_offset_t)offset))) != prot) return (-1); *paddr = IA64_PHYS_TO_RR7(offset); return (0); }
void ia64_sal_init(void) { static int sizes[6] = { 48, 32, 16, 32, 16, 16 }; u_int8_t *p; int error, i; sal_systbl = efi_get_table(&sal_table); if (sal_systbl == NULL) return; if (bcmp(sal_systbl->sal_signature, SAL_SIGNATURE, 4)) { printf("Bad signature for SAL System Table\n"); return; } p = (u_int8_t *) (sal_systbl + 1); for (i = 0; i < sal_systbl->sal_entry_count; i++) { switch (*p) { case 0: { struct sal_entrypoint_descriptor *dp; dp = (struct sal_entrypoint_descriptor*)p; ia64_pal_entry = IA64_PHYS_TO_RR7(dp->sale_pal_proc); if (bootverbose) printf("PAL Proc at 0x%lx\n", ia64_pal_entry); sal_fdesc.func = IA64_PHYS_TO_RR7(dp->sale_sal_proc); sal_fdesc.gp = IA64_PHYS_TO_RR7(dp->sale_sal_gp); if (bootverbose) printf("SAL Proc at 0x%lx, GP at 0x%lx\n", sal_fdesc.func, sal_fdesc.gp); ia64_sal_entry = (sal_entry_t *) &sal_fdesc; break; } case 5: { struct sal_ap_wakeup_descriptor *dp; dp = (struct sal_ap_wakeup_descriptor*)p; if (dp->sale_mechanism != 0) { printf("SAL: unsupported AP wake-up mechanism " "(%d)\n", dp->sale_mechanism); break; } /* Reserve the XIV so that we won't use it. */ error = ia64_xiv_reserve(dp->sale_vector, IA64_XIV_PLAT, NULL); if (error) { printf("SAL: invalid AP wake-up XIV (%#lx)\n", dp->sale_vector); break; } ia64_ipi_wakeup = dp->sale_vector; if (bootverbose) printf("SAL: AP wake-up XIV: %#x\n", ia64_ipi_wakeup); break; } } p += sizes[*p]; } }
void ia64_probe_sapics(void) { ACPI_PHYSICAL_ADDRESS rsdp_ptr; ACPI_SUBTABLE_HEADER *entry; ACPI_TABLE_MADT *table; ACPI_TABLE_RSDP *rsdp; ACPI_TABLE_XSDT *xsdt; char *end, *p; int t, tables; if ((rsdp_ptr = AcpiOsGetRootPointer()) == 0) return; rsdp = (ACPI_TABLE_RSDP *)IA64_PHYS_TO_RR7(rsdp_ptr); xsdt = (ACPI_TABLE_XSDT *)IA64_PHYS_TO_RR7(rsdp->XsdtPhysicalAddress); tables = (UINT64 *)((char *)xsdt + xsdt->Header.Length) - xsdt->TableOffsetEntry; for (t = 0; t < tables; t++) { table = (ACPI_TABLE_MADT *) IA64_PHYS_TO_RR7(xsdt->TableOffsetEntry[t]); if (bootverbose) printf("Table '%c%c%c%c' at %p\n", table->Header.Signature[0], table->Header.Signature[1], table->Header.Signature[2], table->Header.Signature[3], table); if (strncmp(table->Header.Signature, ACPI_SIG_MADT, ACPI_NAME_SIZE) != 0 || ACPI_FAILURE(AcpiTbChecksum((void *)table, table->Header.Length))) continue; /* Save the address of the processor interrupt block. */ if (bootverbose) printf("\tLocal APIC address=0x%x\n", table->Address); ia64_lapic_addr = table->Address; end = (char *)table + table->Header.Length; p = (char *)(table + 1); while (p < end) { entry = (ACPI_SUBTABLE_HEADER *)p; if (bootverbose) print_entry(entry); switch (entry->Type) { case ACPI_MADT_TYPE_IO_SAPIC: { ACPI_MADT_IO_SAPIC *sapic = (ACPI_MADT_IO_SAPIC *)entry; sapic_create(sapic->Id, sapic->GlobalIrqBase, sapic->Address); break; } case ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE: { ACPI_MADT_LOCAL_APIC_OVERRIDE *lapic = (ACPI_MADT_LOCAL_APIC_OVERRIDE *)entry; ia64_lapic_addr = lapic->Address; break; } #ifdef SMP case ACPI_MADT_TYPE_LOCAL_SAPIC: { ACPI_MADT_LOCAL_SAPIC *sapic = (ACPI_MADT_LOCAL_SAPIC *)entry; if (sapic->LapicFlags & ACPI_MADT_ENABLED) cpu_mp_add(sapic->ProcessorId, sapic->Id, sapic->Eid); break; } #endif default: break; } p += entry->Length; } } }
void * ia64_physmem_alloc(vm_size_t len, vm_size_t align) { vm_paddr_t base, lim, pa; void *ptr; u_int idx; if (phys_avail_segs == 0) return (NULL); len = round_page(len); /* * Try and allocate with least effort. */ idx = phys_avail_segs * 2; while (idx > 0) { idx -= 2; base = phys_avail[idx]; lim = phys_avail[idx + 1]; if (lim - base < len) continue; /* First try from the end. */ pa = lim - len; if ((pa & (align - 1)) == 0) { if (pa == base) ia64_physmem_remove(idx); else phys_avail[idx + 1] = pa; goto gotit; } /* Try from the start next. */ pa = base; if ((pa & (align - 1)) == 0) { if (pa + len == lim) ia64_physmem_remove(idx); else phys_avail[idx] += len; goto gotit; } } /* * Find a good segment and split it up. */ idx = phys_avail_segs * 2; while (idx > 0) { idx -= 2; base = phys_avail[idx]; lim = phys_avail[idx + 1]; pa = (base + align - 1) & ~(align - 1); if (pa + len <= lim) { ia64_physmem_delete(pa, len); goto gotit; } } /* Out of luck. */ return (NULL); gotit: ptr = (void *)IA64_PHYS_TO_RR7(pa); bzero(ptr, len); return (ptr); }
/* ARGSUSED */ int memrw(struct cdev *dev, struct uio *uio, int flags) { struct iovec *iov; vm_offset_t addr, eaddr, o, v; int c, error, rw; error = 0; while (uio->uio_resid > 0 && !error) { iov = uio->uio_iov; if (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt < 0) panic("memrw"); continue; } if (dev2unit(dev) == CDEV_MINOR_MEM) { v = uio->uio_offset; kmemphys: /* Allow reads only in RAM. */ rw = (uio->uio_rw == UIO_READ) ? VM_PROT_READ : VM_PROT_WRITE; if ((ia64_pa_access(v) & rw) != rw) { error = EFAULT; c = 0; break; } o = uio->uio_offset & PAGE_MASK; c = min(uio->uio_resid, (int)(PAGE_SIZE - o)); error = uiomove((caddr_t)IA64_PHYS_TO_RR7(v), c, uio); continue; } else if (dev2unit(dev) == CDEV_MINOR_KMEM) { v = uio->uio_offset; if (v >= IA64_RR_BASE(6)) { v = IA64_RR_MASK(v); goto kmemphys; } c = min(iov->iov_len, MAXPHYS); /* * Make sure that all of the pages are currently * resident so that we don't create any zero-fill * pages. */ addr = trunc_page(v); eaddr = round_page(v + c); for (; addr < eaddr; addr += PAGE_SIZE) { if (pmap_extract(kernel_pmap, addr) == 0) return (EFAULT); } if (!kernacc((caddr_t)v, c, (uio->uio_rw == UIO_READ) ? VM_PROT_READ : VM_PROT_WRITE)) return (EFAULT); error = uiomove((caddr_t)v, c, uio); continue; } /* else panic! */ } return (error); }
void ia64_sal_init(void) { static int sizes[6] = { 48, 32, 16, 32, 16, 16 }; u_int8_t *p; int i; sal_systbl = efi_get_table(&sal_table); if (sal_systbl == NULL) return; if (memcmp(sal_systbl->sal_signature, SAL_SIGNATURE, 4)) { printf("Bad signature for SAL System Table\n"); return; } p = (u_int8_t *) (sal_systbl + 1); for (i = 0; i < sal_systbl->sal_entry_count; i++) { switch (*p) { case 0: { struct sal_entrypoint_descriptor *dp; dp = (struct sal_entrypoint_descriptor*)p; ia64_pal_entry = IA64_PHYS_TO_RR7(dp->sale_pal_proc); if (bootverbose) printf("PAL Proc at 0x%lx\n", ia64_pal_entry); sal_fdesc.func = IA64_PHYS_TO_RR7(dp->sale_sal_proc); sal_fdesc.gp = IA64_PHYS_TO_RR7(dp->sale_sal_gp); if (bootverbose) printf("SAL Proc at 0x%lx, GP at 0x%lx\n", sal_fdesc.func, sal_fdesc.gp); ia64_sal_entry = (sal_entry_t *) &sal_fdesc; break; } case 5: { struct sal_ap_wakeup_descriptor *dp; #ifdef SMP struct ia64_sal_result result; struct ia64_fdesc *fd; #endif dp = (struct sal_ap_wakeup_descriptor*)p; if (dp->sale_mechanism != 0) { printf("SAL: unsupported AP wake-up mechanism " "(%d)\n", dp->sale_mechanism); break; } if (dp->sale_vector < 0x10 || dp->sale_vector > 0xff) { printf("SAL: invalid AP wake-up vector " "(0x%lx)\n", dp->sale_vector); break; } /* * SAL documents that the wake-up vector should be * high (close to 255). The MCA rendezvous vector * should be less than the wake-up vector, but still * "high". We use the following priority assignment: * Wake-up: priority of the sale_vector * Rendezvous: priority-1 * Generic IPIs: priority-2 * Special IPIs: priority-3 * Consequently, the wake-up priority should be at * least 4 (ie vector >= 0x40). */ if (dp->sale_vector < 0x40) { printf("SAL: AP wake-up vector too low " "(0x%lx)\n", dp->sale_vector); break; } if (bootverbose) printf("SAL: AP wake-up vector: 0x%lx\n", dp->sale_vector); ipi_vector[IPI_AP_WAKEUP] = dp->sale_vector; setup_ipi_vectors(dp->sale_vector & 0xf0); #ifdef SMP fd = (struct ia64_fdesc *) os_boot_rendez; result = ia64_sal_entry(SAL_SET_VECTORS, SAL_OS_BOOT_RENDEZ, ia64_tpa(fd->func), ia64_tpa(fd->gp), 0, 0, 0, 0); #endif break; } } p += sizes[*p]; } if (ipi_vector[IPI_AP_WAKEUP] == 0) setup_ipi_vectors(0xf0); }
int uart_cpu_getdev(int devtype, struct uart_devinfo *di) { struct dig64_hcdp_table *tbl; struct dig64_hcdp_entry *ent; bus_addr_t addr; unsigned int i, ivar; /* * Use the DIG64 HCDP table if present. */ if (bootinfo.bi_hcdp != 0) { tbl = (void*)IA64_PHYS_TO_RR7(bootinfo.bi_hcdp); for (i = 0; i < tbl->entries; i++) { ent = tbl->entry + i; if (devtype == UART_DEV_CONSOLE && ent->type != DIG64_HCDP_CONSOLE) continue; if (devtype == UART_DEV_DBGPORT && ent->type != DIG64_HCDP_DBGPORT) continue; addr = ent->address.addr_high; addr = (addr << 32) + ent->address.addr_low; di->ops = uart_ns8250_ops; di->bas.chan = 0; di->bas.bst = (ent->address.addr_space == 0) ? IA64_BUS_SPACE_MEM : IA64_BUS_SPACE_IO; if (bus_space_map(di->bas.bst, addr, 8, 0, &di->bas.bsh) != 0) continue; di->bas.regshft = 0; di->bas.rclk = ent->pclock << 4; /* We don't deal with 64-bit baud rates. */ di->baudrate = ent->baud_low; di->databits = ent->databits; di->stopbits = ent->stopbits; di->parity = (ent->parity >= 6) ? UART_PARITY_NONE : dig64_to_uart_parity[ent->parity]; return (0); } /* FALLTHROUGH */ } /* * Scan the hints for backward compatibility. We only try units * 0 to 3 (inclusive). This covers the ISA legacy where 4 UARTs * had their resources predefined. */ for (i = 0; i < 4; i++) { if (resource_int_value("uart", i, "flags", &ivar)) continue; if (devtype == UART_DEV_CONSOLE && !UART_FLAGS_CONSOLE(ivar)) continue; if (devtype == UART_DEV_DBGPORT && !UART_FLAGS_DBGPORT(ivar)) continue; /* * We have a possible device. Make sure it's enabled and * that we have an I/O port. */ if (resource_int_value("uart", i, "disabled", &ivar) == 0 && ivar != 0) continue; if (resource_int_value("uart", i, "port", &ivar) != 0 || ivar == 0) continue; /* * Got it. Fill in the instance and return it. We only have * ns8250 and successors on i386. */ di->ops = uart_ns8250_ops; di->bas.chan = 0; di->bas.bst = IA64_BUS_SPACE_IO; if (bus_space_map(di->bas.bst, ivar, 8, 0, &di->bas.bsh) != 0) continue; di->bas.regshft = 0; di->bas.rclk = 0; if (resource_int_value("uart", i, "baud", &ivar) != 0) ivar = 0; di->baudrate = ivar; di->databits = 8; di->stopbits = 1; di->parity = UART_PARITY_NONE; return (0); } return (ENXIO); }