static pt_entry_t * prom_lev1map(void) { struct alpha_pcb *apcb; /* * Find the level 1 map that we're currently running on. */ apcb = (struct alpha_pcb *)ALPHA_PHYS_TO_K0SEG(curpcb); return ((pt_entry_t *)ALPHA_PHYS_TO_K0SEG(apcb->apcb_ptbr << PGSHIFT)); }
int jensenio_intio_map(void *v, bus_addr_t ioaddr, bus_size_t iosize, int flags, bus_space_handle_t *iohp, int acct) { struct jensenio_config *jcp = v; int linear = flags & BUS_SPACE_MAP_LINEAR; int error; /* * Can't map i/o space linearly. */ if (linear) return (EOPNOTSUPP); if (acct) { #ifdef EXTENT_DEBUG printf("intio: allocating 0x%lx to 0x%lx\n", ioaddr, ioaddr + iosize - 1); #endif error = extent_alloc_region(jcp->jc_io_ex, ioaddr, iosize, EX_NOWAIT | (jcp->jc_mallocsafe ? EX_MALLOCOK : 0)); if (error) { #ifdef EXTENT_DEBUG printf("intio: allocation failed (%d)\n", error); extent_print(jcp->jc_io_ex); #endif return (error); } } *iohp = ALPHA_PHYS_TO_K0SEG((ioaddr << 9) + JENSEN_VL82C106); return (0); }
void __C(CHIP,_mem_unmap)( void *v, bus_space_handle_t memh, bus_size_t memsize, int acct) { bus_addr_t memaddr; int error; if (acct == 0) return; #ifdef EXTENT_DEBUG printf("mem: freeing handle 0x%lx for 0x%lx\n", memh, memsize); #endif memaddr = memh - ALPHA_PHYS_TO_K0SEG(CHIP_MEM_SYS_START(v)); #ifdef EXTENT_DEBUG printf("mem: freeing 0x%lx to 0x%lx\n", memaddr, memaddr + memsize - 1); #endif error = extent_free(CHIP_MEM_EXTENT(v), memaddr, memsize, EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0)); if (error) { printf("%s: WARNING: could not unmap 0x%lx-0x%lx (error %d)\n", __S(__C(CHIP,_mem_unmap)), memaddr, memaddr + memsize - 1, error); #ifdef EXTENT_DEBUG extent_print(CHIP_MEM_EXTENT(v)); #endif } }
int __C(CHIP,_mem_map)( void *v, bus_addr_t memaddr, bus_size_t memsize, int flags, bus_space_handle_t *memhp, int acct) { int error; if (acct == 0) goto mapit; #ifdef EXTENT_DEBUG printf("mem: allocating 0x%lx to 0x%lx\n", memaddr, memaddr + memsize - 1); #endif error = extent_alloc_region(CHIP_MEM_EXTENT(v), memaddr, memsize, EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0)); if (error) { #ifdef EXTENT_DEBUG printf("mem: allocation failed (%d)\n", error); extent_print(CHIP_MEM_EXTENT(v)); #endif return (error); } mapit: *memhp = ALPHA_PHYS_TO_K0SEG(CHIP_MEM_SYS_START(v)) + memaddr; return (0); }
/* * Prevent loading a kernel if it would overlap the SRM. */ int check_phdr(void *hdr) { Elf64_Phdr *phdr = (Elf64_Phdr *)hdr; struct rpb *hwrpb = (struct rpb *)HWRPB_ADDR; struct mddt *mddtp; struct mddt_cluster *memc; u_int64_t cstart, cend; u_int64_t i; mddtp = (struct mddt *)(((caddr_t)hwrpb) + hwrpb->rpb_memdat_off); for (i = 0; i < mddtp->mddt_cluster_cnt; i++) { memc = &mddtp->mddt_clusters[i]; if (memc->mddt_usage & MDDT_PALCODE) { cstart = ALPHA_PHYS_TO_K0SEG(ptoa(memc->mddt_pfn)); cend = cstart + ptoa(memc->mddt_pg_cnt); if (phdr->p_vaddr + phdr->p_memsz <= cstart || phdr->p_vaddr >= cend) continue; printf("SRM console and kernel image would overlap.\n" "Please report this to <*****@*****.**>, " "with the following values:\n" "SRM range: %p-%p\n" "kernel range: %p-%p\n", cstart, cend, phdr->p_vaddr, phdr->p_vaddr + phdr->p_memsz); return 1; } } return 0; }
static pt_entry_t * rom_lev1map() { struct alpha_pcb *apcb; /* * We may be called before the first context switch * after alpha_init(), in which case we just need * to use the kernel Lev1map. */ if (curpcb == 0) return (Lev1map); /* * Find the level 1 map that we're currently running on. */ apcb = (struct alpha_pcb *)ALPHA_PHYS_TO_K0SEG((vm_offset_t) curpcb); return ((pt_entry_t *)ALPHA_PHYS_TO_K0SEG(alpha_ptob(apcb->apcb_ptbr))); }
pcireg_t ttwoga_conf_read(void *cpv, pcitag_t tag, int offset) { struct ttwoga_config *tcp = cpv; pcireg_t *datap, data; int b, d, f, ba; paddr_t addr; uint64_t old_hae3; if ((unsigned int)offset >= PCI_CONF_SIZE) return (pcireg_t) -1; pci_decompose_tag(&tcp->tc_pc, tag, &b, &d, &f); addr = b ? tag : ttwoga_make_type0addr(d, f); if (addr == (paddr_t)-1) return ((pcireg_t) -1); TTWOGA_CONF_LOCK(); alpha_mb(); old_hae3 = T2GA(tcp, T2_HAE0_3) & ~HAE0_3_PCA; T2GA(tcp, T2_HAE0_3) = old_hae3 | ((b ? 1UL : 0UL) << HAE0_3_PCA_SHIFT); alpha_mb(); alpha_mb(); datap = (pcireg_t *)ALPHA_PHYS_TO_K0SEG(tcp->tc_sysmap->tsmap_conf_base | addr << 5UL | /* address shift */ (offset & ~0x03) << 5UL | /* address shift */ 0x3 << 3UL); /* 4-byte, size shift */ data = (pcireg_t)-1; if (!(ba = badaddr(datap, sizeof *datap))) data = *datap; alpha_mb(); T2GA(tcp, T2_HAE0_3) = old_hae3; alpha_mb(); alpha_mb(); alpha_pal_draina(); alpha_mb(); alpha_mb(); TTWOGA_CONF_UNLOCK(); #if 0 printf("ttwoga_conf_read: tag 0x%lx, reg 0x%x -> 0x%x @ %p%s\n", tag, offset, data, datap, ba ? " (badaddr)" : ""); #endif return (data); }
void ttwoga_conf_write(void *cpv, pcitag_t tag, int offset, pcireg_t data) { struct ttwoga_config *tcp = cpv; pcireg_t *datap; int b, d, f; paddr_t addr; uint64_t old_hae3; if ((unsigned int)offset >= PCI_CONF_SIZE) return; pci_decompose_tag(&tcp->tc_pc, tag, &b, &d, &f); addr = b ? tag : ttwoga_make_type0addr(d, f); if (addr == (paddr_t)-1) return; TTWOGA_CONF_LOCK(); alpha_mb(); old_hae3 = T2GA(tcp, T2_HAE0_3) & ~HAE0_3_PCA; T2GA(tcp, T2_HAE0_3) = old_hae3 | ((b ? 1UL : 0UL) << HAE0_3_PCA_SHIFT); alpha_mb(); alpha_mb(); datap = (pcireg_t *)ALPHA_PHYS_TO_K0SEG(tcp->tc_sysmap->tsmap_conf_base | addr << 5UL | /* address shift */ (offset & ~0x03) << 5UL | /* address shift */ 0x3 << 3UL); /* 4-byte, size shift */ alpha_mb(); *datap = data; alpha_mb(); alpha_mb(); alpha_mb(); T2GA(tcp, T2_HAE0_3) = old_hae3; alpha_mb(); alpha_mb(); TTWOGA_CONF_UNLOCK(); #if 0 printf("ttwoga_conf_write: tag 0x%lx, reg 0x%x -> 0x%x @ %p\n", tag, offset, data, datap); #endif }
static int __C(CHIP,_xlate_addr_to_dense_handle)(void *v, bus_addr_t memaddr, bus_space_handle_t *memhp) { #ifdef CHIP_D_MEM_W1_BUS_START if (memaddr >= CHIP_D_MEM_W1_BUS_START(v) && memaddr <= CHIP_D_MEM_W1_BUS_END(v)) { if (memhp != NULL) *memhp = ALPHA_PHYS_TO_K0SEG(CHIP_D_MEM_W1_SYS_START(v)) + (memaddr - CHIP_D_MEM_W1_BUS_START(v)); return (1); } else #endif return (0); }
int __C(CHIP,_mem_alloc)( void *v, bus_addr_t rstart, bus_addr_t rend, bus_size_t size, bus_size_t align, bus_size_t boundary, int flags, bus_addr_t *addrp, bus_space_handle_t *bshp) { bus_addr_t memaddr; int error; /* * Do the requested allocation. */ #ifdef EXTENT_DEBUG printf("mem: allocating from 0x%lx to 0x%lx\n", rstart, rend); #endif error = extent_alloc_subregion(CHIP_MEM_EXTENT(v), rstart, rend, size, align, boundary, EX_FAST | EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0), &memaddr); if (error) { #ifdef EXTENT_DEBUG printf("mem: allocation failed (%d)\n", error); extent_print(CHIP_MEM_EXTENT(v)); #endif } #ifdef EXTENT_DEBUG printf("mem: allocated 0x%lx to 0x%lx\n", memaddr, memaddr + size - 1); #endif *addrp = memaddr; *bshp = ALPHA_PHYS_TO_K0SEG(CHIP_MEM_SYS_START(v)) + memaddr; return (0); }
void alpha_sgmap_init(bus_dma_tag_t t, struct alpha_sgmap *sgmap, const char *name, bus_addr_t wbase, bus_addr_t sgvabase, bus_size_t sgvasize, size_t ptesize, void *ptva, bus_size_t minptalign) { bus_dma_segment_t seg; size_t ptsize; int rseg; if (sgvasize & PGOFSET) { printf("size botch for sgmap `%s'\n", name); goto die; } sgmap->aps_wbase = wbase; sgmap->aps_sgvabase = sgvabase; sgmap->aps_sgvasize = sgvasize; if (ptva != NULL) { /* * We already have a page table; this may be a system * where the page table resides in bridge-resident SRAM. */ sgmap->aps_pt = ptva; sgmap->aps_ptpa = 0; } else { /* * Compute the page table size and allocate it. At minimum, * this must be aligned to the page table size. However, * some platforms have more strict alignment reqirements. */ ptsize = (sgvasize / PAGE_SIZE) * ptesize; if (minptalign != 0) { if (minptalign < ptsize) minptalign = ptsize; } else minptalign = ptsize; if (bus_dmamem_alloc(t, ptsize, minptalign, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { panic("unable to allocate page table for sgmap `%s'", name); goto die; } sgmap->aps_ptpa = seg.ds_addr; sgmap->aps_pt = (void *)ALPHA_PHYS_TO_K0SEG(sgmap->aps_ptpa); } /* * Create the extent map used to manage the virtual address * space. */ sgmap->aps_ex = extent_create(name, sgvabase, sgvasize - 1, NULL, 0, EX_NOWAIT|EX_NOCOALESCE); if (sgmap->aps_ex == NULL) { printf("unable to create extent map for sgmap `%s'\n", name); goto die; } /* * Allocate a spill page if that hasn't already been done. */ if (alpha_sgmap_prefetch_spill_page_va == 0) { if (bus_dmamem_alloc(t, PAGE_SIZE, 0, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { printf("unable to allocate spill page for sgmap `%s'\n", name); goto die; } alpha_sgmap_prefetch_spill_page_pa = seg.ds_addr; alpha_sgmap_prefetch_spill_page_va = ALPHA_PHYS_TO_K0SEG(alpha_sgmap_prefetch_spill_page_pa); memset((void *)alpha_sgmap_prefetch_spill_page_va, 0, PAGE_SIZE); } return; die: panic("alpha_sgmap_init"); }