void pci_init_extents(void) { bios_memmap_t *bmp; u_int64_t size; if (pciio_ex == NULL) { /* * We only have 64K of addressable I/O space. * However, since BARs may contain garbage, we cover * the full 32-bit address space defined by PCI of * which we only make the first 64K available. */ pciio_ex = extent_create("pciio", 0, 0xffffffff, M_DEVBUF, NULL, 0, EX_NOWAIT | EX_FILLED); if (pciio_ex == NULL) return; extent_free(pciio_ex, 0, 0x10000, M_NOWAIT); } if (pcimem_ex == NULL) { pcimem_ex = extent_create("pcimem", 0, 0xffffffff, M_DEVBUF, NULL, 0, EX_NOWAIT); if (pcimem_ex == NULL) return; for (bmp = bios_memmap; bmp->type != BIOS_MAP_END; bmp++) { /* * Ignore address space beyond 4G. */ if (bmp->addr >= 0x100000000ULL) continue; size = bmp->size; if (bmp->addr + size >= 0x100000000ULL) size = 0x100000000ULL - bmp->addr; /* Ignore zero-sized regions. */ if (size == 0) continue; if (extent_alloc_region(pcimem_ex, bmp->addr, size, EX_NOWAIT)) printf("memory map conflict 0x%llx/0x%llx\n", bmp->addr, bmp->size); } /* Take out the video buffer area and BIOS areas. */ extent_alloc_region(pcimem_ex, IOM_BEGIN, IOM_SIZE, EX_CONFLICTOK | EX_NOWAIT); } }
int __C(CHIP,_mem_map)( void *v, bus_addr_t memaddr, bus_size_t memsize, int flags, bus_space_handle_t *memhp, int acct) { int error; if (acct == 0) goto mapit; #ifdef EXTENT_DEBUG printf("mem: allocating 0x%lx to 0x%lx\n", memaddr, memaddr + memsize - 1); #endif error = extent_alloc_region(CHIP_MEM_EXTENT(v), memaddr, memsize, EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0)); if (error) { #ifdef EXTENT_DEBUG printf("mem: allocation failed (%d)\n", error); extent_print(CHIP_MEM_EXTENT(v)); #endif return (error); } mapit: *memhp = ALPHA_PHYS_TO_K0SEG(CHIP_MEM_SYS_START(v)) + memaddr; return (0); }
int pciaddr_do_resource_reserve_disabled(struct pcibios_softc *sc, pci_chipset_tag_t pc, pcitag_t tag, int mapreg, struct extent *ex, int type, u_long *addr, bus_size_t size) { pcireg_t val; int error; if (*addr == 0) return (0); val = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); if (type == PCI_MAPREG_TYPE_MEM && (val & PCI_COMMAND_MEM_ENABLE) == PCI_COMMAND_MEM_ENABLE) return (0); if (type == PCI_MAPREG_TYPE_IO && (val & PCI_COMMAND_IO_ENABLE) == PCI_COMMAND_IO_ENABLE) return (0); PCIBIOS_PRINTV(("disabled %s space at addr 0x%x size 0x%x\n", type == PCI_MAPREG_TYPE_MEM ? "mem" : "io", *addr, size)); error = extent_alloc_region(ex, *addr, size, EX_NOWAIT | EX_MALLOCOK); if (error) { PCIBIOS_PRINTV(("Resource conflict.\n")); pci_conf_write(pc, tag, mapreg, 0); /* clear */ return (1); } return (0); }
/* default bus_space tag */ static int _default_map(void *t, bus_addr_t bpa, bus_size_t size, int flags, bus_space_handle_t *bshp) { struct playstation2_bus_space *pbs = t; struct extent *ex = pbs->pbs_extent; int error; if (ex == 0) { *bshp = (bus_space_handle_t)(bpa + pbs->pbs_base_addr); return (0); } bpa += ex->ex_start; error = extent_alloc_region(ex, bpa, size, EX_NOWAIT | EX_MALLOCOK); if (error) { DPRINTF("failed.\n"); return (error); } *bshp = (bus_space_handle_t)bpa; DPRINTF("success.\n"); return (0); }
void tsp_bus_mem_init2(bus_space_tag_t t, void *v) { struct tsp_config *pcp = v; struct ts_pchip *pccsr = pcp->pc_csr; int i, error; /* * Allocate the DMA windows out of the extent map. */ for (i = 0; i < 4; i++) { alpha_mb(); if ((pccsr->tsp_wsba[i].tsg_r & WSBA_ENA) == 0) { /* Window not in use. */ continue; } error = extent_alloc_region(CHIP_MEM_EXTENT(v), WSBA_ADDR(pccsr->tsp_wsba[i].tsg_r), WSM_LEN(pccsr->tsp_wsm[i].tsg_r), EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0)); if (error) { printf("WARNING: unable to reserve DMA window " "0x%lx - 0x%lx\n", WSBA_ADDR(pccsr->tsp_wsba[i].tsg_r), WSBA_ADDR(pccsr->tsp_wsba[i].tsg_r) + (WSM_LEN(pccsr->tsp_wsm[i].tsg_r) - 1)); } } }
int pciaddr_do_resource_reserve_disabled(struct shpcic_softc *sc, pci_chipset_tag_t pc, pcitag_t tag, int mapreg, struct extent *ex, int type, bus_addr_t *addr, bus_size_t size) { pcireg_t val; int error; if ((type == PCI_MAPREG_TYPE_IO) && ((*addr & PCIADDR_PORT_END) == 0)) return (0); if (*addr == 0) return (0); val = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); if (type == PCI_MAPREG_TYPE_MEM && (val & PCI_COMMAND_MEM_ENABLE) == PCI_COMMAND_MEM_ENABLE) return (0); if (type == PCI_MAPREG_TYPE_IO && (val & PCI_COMMAND_IO_ENABLE) == PCI_COMMAND_IO_ENABLE) return (0); error = extent_alloc_region(ex, *addr, size, EX_NOWAIT | EX_MALLOCOK); if (error) { PCIBIOS_PRINTV(("Resource conflict.\n")); pci_conf_write(pc, tag, mapreg, 0); /* clear */ return (1); } return (0); }
int jensenio_intio_map(void *v, bus_addr_t ioaddr, bus_size_t iosize, int flags, bus_space_handle_t *iohp, int acct) { struct jensenio_config *jcp = v; int linear = flags & BUS_SPACE_MAP_LINEAR; int error; /* * Can't map i/o space linearly. */ if (linear) return (EOPNOTSUPP); if (acct) { #ifdef EXTENT_DEBUG printf("intio: allocating 0x%lx to 0x%lx\n", ioaddr, ioaddr + iosize - 1); #endif error = extent_alloc_region(jcp->jc_io_ex, ioaddr, iosize, EX_NOWAIT | (jcp->jc_mallocsafe ? EX_MALLOCOK : 0)); if (error) { #ifdef EXTENT_DEBUG printf("intio: allocation failed (%d)\n", error); extent_print(jcp->jc_io_ex); #endif return (error); } } *iohp = ALPHA_PHYS_TO_K0SEG((ioaddr << 9) + JENSEN_VL82C106); return (0); }
int bus_space_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size, int flags, bus_space_handle_t *bshp) { int error; struct extent *ex; /* * Pick the appropriate extent map. */ if (t == X86_BUS_SPACE_IO) { ex = ioport_ex; if (flags & BUS_SPACE_MAP_LINEAR) return (EINVAL); } else if (t == X86_BUS_SPACE_MEM) ex = iomem_ex; else panic("bus_space_map: bad bus space tag"); /* * Before we go any further, let's make sure that this * region is available. */ error = extent_alloc_region(ex, bpa, size, EX_NOWAIT | (ioport_malloc_safe ? EX_MALLOCOK : 0)); if (error) return (error); /* * For I/O space, that's all she wrote. */ if (t == X86_BUS_SPACE_IO) { *bshp = bpa; return (0); } if (bpa >= IOM_BEGIN && (bpa + size) <= IOM_END) { *bshp = (bus_space_handle_t)ISA_HOLE_VADDR(bpa); return(0); } /* * For memory space, map the bus physical address to * a kernel virtual address. */ error = x86_mem_add_mapping(bpa, size, flags, bshp); if (error) { if (extent_free(ex, bpa, size, EX_NOWAIT | (ioport_malloc_safe ? EX_MALLOCOK : 0))) { printf("bus_space_map: pa 0x%lx, size 0x%lx\n", bpa, size); printf("bus_space_map: can't free region\n"); } } return (error); }
int au_himem_map(void *cookie, bus_addr_t addr, bus_size_t size, int flags, bus_space_handle_t *bshp, int acct) { au_himem_cookie_t *c = (au_himem_cookie_t *)cookie; int err; paddr_t pa; vaddr_t va; vsize_t realsz; int s; /* make sure we can map this bus address */ if (addr < c->c_start || (addr + size) > c->c_end) { return EINVAL; } /* physical address, page aligned */ pa = TRUNC_PAGE(c->c_physoff + addr); /* * we are only going to work with whole pages. the * calculation is the offset into the first page, plus the * intended size, rounded up to a whole number of pages. */ realsz = ROUND_PAGE((addr % PAGE_SIZE) + size); va = uvm_km_alloc(kernel_map, realsz, PAGE_SIZE, UVM_KMF_VAONLY | UVM_KMF_NOWAIT); if (va == 0) { return ENOMEM; } /* virtual address in handle (offset appropriately) */ *bshp = va + (addr % PAGE_SIZE); /* map the pages in the kernel pmap */ s = splhigh(); while (realsz) { pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE); pa += PAGE_SIZE; va += PAGE_SIZE; realsz -= PAGE_SIZE; } pmap_update(pmap_kernel()); splx(s); /* record our allocated range of bus addresses */ if (acct && c->c_extent != NULL) { err = extent_alloc_region(c->c_extent, addr, size, EX_NOWAIT); if (err) { au_himem_unmap(cookie, *bshp, size, 0); return err; } } return 0; }
int vme_map(struct vme_softc *sc, struct extent *ext, u_int awidth, bus_addr_t addr, bus_size_t size, int flags, vaddr_t *rva) { const struct vme_range *r; int rc; paddr_t pa; psize_t offs, len; /* * Since we need to map VME address ranges on demand, we will allocate * with a page granularity. */ pa = trunc_page(addr); offs = addr - pa; len = round_page(addr + size) - pa; /* * Check that the mapping fits within the available address ranges. */ for (r = sc->sc_ranges; r->vr_width != 0; r++) { if (r->vr_width == awidth && r->vr_start <= addr && r->vr_end >= addr + size - 1) break; } if (r->vr_width == 0) return EINVAL; /* * Register this range in the per-width extent. */ if (ext != NULL) { rc = extent_alloc_region(ext, atop(pa), atop(len), EX_NOWAIT | EX_MALLOCOK); if (rc != 0) return rc; } /* * Allocate virtual memory for the range and map it. */ rc = vme_map_r(r, pa, len, flags, UVM_PROT_RW, rva); if (rc != 0) { if (ext != NULL) (void)extent_free(ext, atop(pa), atop(len), EX_NOWAIT | EX_MALLOCOK); return rc; } *rva += offs; return 0; }
void sandpoint_bus_space_init(void) { int error; ioport_ex = extent_create("ioport", 0, 0x00bfffff, M_DEVBUF, (caddr_t)ioport_ex_storage, sizeof(ioport_ex_storage), EX_NOCOALESCE|EX_NOWAIT); error = extent_alloc_region(ioport_ex, 0x10000, 0x7F0000, EX_NOWAIT); if (error) panic("sandpoint_bus_space_init: can't block out reserved I/O space 0x10000-0x7fffff: error=%d\n", error); iomem_ex = extent_create("iomem", 0x80000000, 0xfdffffff, M_DEVBUF, (caddr_t)iomem_ex_storage, sizeof(iomem_ex_storage), EX_NOCOALESCE|EX_NOWAIT); }
int arc_bus_space_map(bus_space_tag_t bst, bus_addr_t addr, bus_size_t size, int flags, bus_space_handle_t *bshp) { int err; if (addr < bst->bs_start || addr + size > bst->bs_start + bst->bs_size) return EINVAL; if (bst->bs_extent != NULL) { err = extent_alloc_region(bst->bs_extent, addr, size, EX_NOWAIT | malloc_safe); if (err) return err; } return bus_space_compose_handle(bst, addr, size, flags, bshp); }
int __BS(map)(void *v, bus_addr_t addr, bus_size_t size, int flags, bus_space_handle_t *hp, int acct) { struct mips_bus_space_translation mbst; int error; /* * Get the translation for this address. */ error = __BS(translate)(v, addr, size, flags, &mbst); if (error) return (error); #ifdef CHIP_EXTENT if (acct == 0) goto mapit; #ifdef EXTENT_DEBUG printf("xxx: allocating 0x%lx to 0x%lx\n", addr, addr + size - 1); #endif error = extent_alloc_region(CHIP_EXTENT(v), addr, size, EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0)); if (error) { #ifdef EXTENT_DEBUG printf("xxx: allocation failed (%d)\n", error); extent_print(CHIP_EXTENT(v)); #endif return (error); } mapit: #endif /* CHIP_EXTENT */ if (flags & BUS_SPACE_MAP_CACHEABLE) *hp = MIPS_PHYS_TO_KSEG0(mbst.mbst_sys_start + (addr - mbst.mbst_bus_start)); else *hp = MIPS_PHYS_TO_KSEG1(mbst.mbst_sys_start + (addr - mbst.mbst_bus_start)); return (0); }
int mbus_map(void *v, bus_addr_t bpa, bus_size_t size, int flags, bus_space_handle_t *bshp) { int error; bpa &= HPPA_PHYSMAP; if ((error = extent_alloc_region(hppa_ex, bpa, size, EX_NOWAIT))) return (error); if ((error = mbus_add_mapping(bpa, size, flags, bshp))) { if (extent_free(hppa_ex, bpa, size, EX_NOWAIT)) { printf("bus_space_map: pa 0x%lx, size 0x%lx\n", bpa, size); printf("bus_space_map: can't free region\n"); } } return error; }
int pciaddr_do_resource_reserve(pci_chipset_tag_t pc, pcitag_t tag, int mapreg, void *ctx, int type, bus_addr_t *addr, bus_size_t size) { struct extent *ex; struct pciaddr *pciaddrmap = (struct pciaddr *)ctx; int error; if (*addr == 0) return 1; ex = (type == PCI_MAPREG_TYPE_MEM ? pciaddrmap->extent_mem : pciaddrmap->extent_port); error = extent_alloc_region(ex, *addr, size, EX_NOWAIT| EX_MALLOCOK); if (error) { aprint_debug("Resource conflict.\n"); pci_conf_write(pc, tag, mapreg, 0); /* clear */ return 1; } return 0; }
void hppa_init() { extern int kernel_text, end; struct pdc_hwtlb pdc_hwtlb PDC_ALIGNMENT; struct pdc_coproc pdc_coproc PDC_ALIGNMENT; vm_offset_t v, vstart, vend; register int pdcerr; int usehpt; /* init PDC iface, so we can call em easy */ pdc_init(); /* calculate cpu speed */ cpu_hzticks = (PAGE0->mem_10msec * 100) / hz; delay_init(); /* * get cache parameters from the PDC */ if ((pdcerr = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_DFLT, &pdc_cache)) < 0) { #ifdef DIAGNOSTIC printf("Warning: PDC_CACHE call Ret'd %d\n", pdcerr); #endif } dcache_line_mask = pdc_cache.dc_conf.cc_line * 16 - 1; dcache_size = pdc_cache.dc_size; dcache_stride = pdc_cache.dc_stride; icache_stride = pdc_cache.ic_stride; /* * purge TLBs and flush caches */ if (pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB, PDC_BTLB_PURGE_ALL) < 0) printf("WARNING: BTLB purge failed\n"); ptlball(); fcacheall(); /* calculate HPT size */ hpt_hashsize = PAGE0->imm_max_mem / NBPG; mtctl(hpt_hashsize - 1, CR_HPTMASK); /* * If we want to use the HW TLB support, ensure that it exists. */ if (pdc_call((iodcio_t)pdc, 0, PDC_TLB, PDC_TLB_INFO, &pdc_hwtlb) && !pdc_hwtlb.min_size && !pdc_hwtlb.max_size) { printf("WARNING: no HW tlb walker\n"); usehpt = 0; } else { usehpt = 1; #ifdef DEBUG printf("hwtlb: %u-%u, %u/", pdc_hwtlb.min_size, pdc_hwtlb.max_size, hpt_hashsize); #endif if (hpt_hashsize > pdc_hwtlb.max_size) hpt_hashsize = pdc_hwtlb.max_size; else if (hpt_hashsize < pdc_hwtlb.min_size) hpt_hashsize = pdc_hwtlb.min_size; #ifdef DEBUG printf("%u (0x%x)\n", hpt_hashsize, hpt_hashsize * sizeof(struct hpt_entry)); #endif } totalphysmem = PAGE0->imm_max_mem / NBPG; resvmem = ((vm_offset_t)&kernel_text) / NBPG; vstart = hppa_round_page(&end); vend = VM_MAX_KERNEL_ADDRESS; /* we hope this won't fail */ hppa_ex = extent_create("mem", 0x0, 0xffffffff, M_DEVBUF, (caddr_t)mem_ex_storage, sizeof(mem_ex_storage), EX_NOCOALESCE|EX_NOWAIT); if (extent_alloc_region(hppa_ex, 0, (vm_offset_t)PAGE0->imm_max_mem, EX_NOWAIT)) panic("cannot reserve main memory"); /* * Allocate space for system data structures. We are given * a starting virtual address and we return a final virtual * address; along the way we set each data structure pointer. * * We call allocsys() with 0 to find out how much space we want, * allocate that much and fill it with zeroes, and the call * allocsys() again with the correct base virtual address. */ v = vstart; #define valloc(name, type, num) \ (name) = (type *)v; v = (vm_offset_t)((name)+(num)) #ifdef REAL_CLISTS valloc(cfree, struct cblock, nclist); #endif valloc(callout, struct callout, ncallout); nswapmap = maxproc * 2; valloc(swapmap, struct map, nswapmap); #ifdef SYSVSHM valloc(shmsegs, struct shmid_ds, shminfo.shmmni); #endif #ifdef SYSVSEM valloc(sema, struct semid_ds, seminfo.semmni); valloc(sem, struct sem, seminfo.semmns); /* This is pretty disgusting! */ valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int)); #endif #ifdef SYSVMSG valloc(msgpool, char, msginfo.msgmax); valloc(msgmaps, struct msgmap, msginfo.msgseg); valloc(msghdrs, struct msg, msginfo.msgtql); valloc(msqids, struct msqid_ds, msginfo.msgmni); #endif #ifndef BUFCACHEPERCENT #define BUFCACHEPERCENT 10 #endif /* BUFCACHEPERCENT */ if (bufpages == 0) bufpages = totalphysmem / BUFCACHEPERCENT / CLSIZE; if (nbuf == 0) { nbuf = bufpages; if (nbuf < 16) nbuf = 16; } /* Restrict to at most 70% filled kvm */ if (nbuf * MAXBSIZE > (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) * 7 / 10) nbuf = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / MAXBSIZE * 7 / 10; /* More buffer pages than fits into the buffers is senseless. */ if (bufpages > nbuf * MAXBSIZE / CLBYTES) bufpages = nbuf * MAXBSIZE / CLBYTES; if (nswbuf == 0) { nswbuf = (nbuf / 2) & ~1; /* force even */ if (nswbuf > 256) nswbuf = 256; /* sanity */ } valloc(swbuf, struct buf, nswbuf); valloc(buf, struct buf, nbuf); #undef valloc bzero ((void *)vstart, (v - vstart)); vstart = v; pmap_bootstrap(&vstart, &vend); physmem = totalphysmem - btoc(vstart); /* alloc msgbuf */ if (!(msgbufp = (void *)pmap_steal_memory(sizeof(struct msgbuf), NULL, NULL))) panic("cannot allocate msgbuf"); msgbufmapped = 1; #ifdef DEBUG printf("mem: %x+%x, %x\n", physmem, resvmem, totalphysmem); #endif /* Turn on the HW TLB assist */ if (usehpt) { if ((pdcerr = pdc_call((iodcio_t)pdc, 0, PDC_TLB, PDC_TLB_CONFIG, &pdc_hwtlb, hpt_table, sizeof(struct hpt_entry) * hpt_hashsize, PDC_TLB_WORD3)) < 0) { printf("Warning: HW TLB init failed (%d), disabled\n", pdcerr); usehpt = 0; } else printf("HW TLB(%d entries at 0x%x) initialized (%d)\n", hpt_hashsize, hpt_table, pdcerr); } /* * Locate any coprocessors and enable them by setting up the CCR. * SFU's are ignored (since we dont have any). Also, initialize * the floating point registers here. */ if ((pdcerr = pdc_call((iodcio_t)pdc, 0, PDC_COPROC, PDC_COPROC_DFLT, &pdc_coproc)) < 0) printf("WARNING: PDC_COPROC call Ret'd %d\n", pdcerr); else { #ifdef DEBUG printf("pdc_coproc: %x, %x\n", pdc_coproc.ccr_enable, pdc_coproc.ccr_present); #endif } copr_sfu_config = pdc_coproc.ccr_enable; mtctl(copr_sfu_config & CCR_MASK, CR_CCR); /* fprinit(&fpcopr_version); fpcopr_version = (fpcopr_version & 0x003ff800) >> 11; mtctl(CR_CCR, 0); */ /* * Clear the FAULT light (so we know when we get a real one) * PDC_COPROC apparently turns it on (for whatever reason). */ pdcerr = PDC_OSTAT(PDC_OSTAT_RUN) | 0xCEC0; (void) (*pdc)(PDC_CHASSIS, PDC_CHASSIS_DISP, pdcerr); #ifdef DDB ddb_init(); #endif #ifdef DEBUG printf("hppa_init: leaving\n"); #endif kernelmapped++; }
void __BS(init)(bus_space_tag_t t, void *v) { #ifdef CHIP_EXTENT struct extent *ex; #endif /* * Initialize the bus space tag. */ /* cookie */ t->bs_cookie = v; /* mapping/unmapping */ t->bs_map = __BS(map); t->bs_unmap = __BS(unmap); t->bs_subregion = __BS(subregion); t->bs_translate = __BS(translate); t->bs_get_window = __BS(get_window); /* allocation/deallocation */ t->bs_alloc = __BS(alloc); t->bs_free = __BS(free); /* get kernel virtual address */ t->bs_vaddr = __BS(vaddr); /* mmap for user */ t->bs_mmap = __BS(mmap); /* barrier */ t->bs_barrier = __BS(barrier); /* read (single) */ t->bs_r_1 = __BS(read_1); t->bs_r_2 = __BS(read_2); t->bs_r_4 = __BS(read_4); t->bs_r_8 = __BS(read_8); /* read multiple */ t->bs_rm_1 = __BS(read_multi_1); t->bs_rm_2 = __BS(read_multi_2); t->bs_rm_4 = __BS(read_multi_4); t->bs_rm_8 = __BS(read_multi_8); /* read region */ t->bs_rr_1 = __BS(read_region_1); t->bs_rr_2 = __BS(read_region_2); t->bs_rr_4 = __BS(read_region_4); t->bs_rr_8 = __BS(read_region_8); /* write (single) */ t->bs_w_1 = __BS(write_1); t->bs_w_2 = __BS(write_2); t->bs_w_4 = __BS(write_4); t->bs_w_8 = __BS(write_8); /* write multiple */ t->bs_wm_1 = __BS(write_multi_1); t->bs_wm_2 = __BS(write_multi_2); t->bs_wm_4 = __BS(write_multi_4); t->bs_wm_8 = __BS(write_multi_8); /* write region */ t->bs_wr_1 = __BS(write_region_1); t->bs_wr_2 = __BS(write_region_2); t->bs_wr_4 = __BS(write_region_4); t->bs_wr_8 = __BS(write_region_8); /* set multiple */ t->bs_sm_1 = __BS(set_multi_1); t->bs_sm_2 = __BS(set_multi_2); t->bs_sm_4 = __BS(set_multi_4); t->bs_sm_8 = __BS(set_multi_8); /* set region */ t->bs_sr_1 = __BS(set_region_1); t->bs_sr_2 = __BS(set_region_2); t->bs_sr_4 = __BS(set_region_4); t->bs_sr_8 = __BS(set_region_8); /* copy */ t->bs_c_1 = __BS(copy_region_1); t->bs_c_2 = __BS(copy_region_2); t->bs_c_4 = __BS(copy_region_4); t->bs_c_8 = __BS(copy_region_8); #ifdef CHIP_NEED_STREAM /* read (single), stream */ t->bs_rs_1 = __BS(read_stream_1); t->bs_rs_2 = __BS(read_stream_2); t->bs_rs_4 = __BS(read_stream_4); t->bs_rs_8 = __BS(read_stream_8); /* read multiple, stream */ t->bs_rms_1 = __BS(read_multi_stream_1); t->bs_rms_2 = __BS(read_multi_stream_2); t->bs_rms_4 = __BS(read_multi_stream_4); t->bs_rms_8 = __BS(read_multi_stream_8); /* read region, stream */ t->bs_rrs_1 = __BS(read_region_stream_1); t->bs_rrs_2 = __BS(read_region_stream_2); t->bs_rrs_4 = __BS(read_region_stream_4); t->bs_rrs_8 = __BS(read_region_stream_8); /* write (single), stream */ t->bs_ws_1 = __BS(write_stream_1); t->bs_ws_2 = __BS(write_stream_2); t->bs_ws_4 = __BS(write_stream_4); t->bs_ws_8 = __BS(write_stream_8); /* write multiple, stream */ t->bs_wms_1 = __BS(write_multi_stream_1); t->bs_wms_2 = __BS(write_multi_stream_2); t->bs_wms_4 = __BS(write_multi_stream_4); t->bs_wms_8 = __BS(write_multi_stream_8); /* write region, stream */ t->bs_wrs_1 = __BS(write_region_stream_1); t->bs_wrs_2 = __BS(write_region_stream_2); t->bs_wrs_4 = __BS(write_region_stream_4); t->bs_wrs_8 = __BS(write_region_stream_8); #else /* CHIP_NEED_STREAM */ /* read (single), stream */ t->bs_rs_1 = __BS(read_1); t->bs_rs_2 = __BS(read_2); t->bs_rs_4 = __BS(read_4); t->bs_rs_8 = __BS(read_8); /* read multiple, stream */ t->bs_rms_1 = __BS(read_multi_1); t->bs_rms_2 = __BS(read_multi_2); t->bs_rms_4 = __BS(read_multi_4); t->bs_rms_8 = __BS(read_multi_8); /* read region, stream */ t->bs_rrs_1 = __BS(read_region_1); t->bs_rrs_2 = __BS(read_region_2); t->bs_rrs_4 = __BS(read_region_4); t->bs_rrs_8 = __BS(read_region_8); /* write (single), stream */ t->bs_ws_1 = __BS(write_1); t->bs_ws_2 = __BS(write_2); t->bs_ws_4 = __BS(write_4); t->bs_ws_8 = __BS(write_8); /* write multiple, stream */ t->bs_wms_1 = __BS(write_multi_1); t->bs_wms_2 = __BS(write_multi_2); t->bs_wms_4 = __BS(write_multi_4); t->bs_wms_8 = __BS(write_multi_8); /* write region, stream */ t->bs_wrs_1 = __BS(write_region_1); t->bs_wrs_2 = __BS(write_region_2); t->bs_wrs_4 = __BS(write_region_4); t->bs_wrs_8 = __BS(write_region_8); #endif /* CHIP_NEED_STREAM */ #ifdef CHIP_EXTENT /* XXX WE WANT EXTENT_NOCOALESCE, BUT WE CAN'T USE IT. XXX */ ex = extent_create(__S(__BS(bus)), 0x0UL, 0xffffffffUL, M_DEVBUF, (void *)CHIP_EX_STORE(v), CHIP_EX_STORE_SIZE(v), EX_NOWAIT); extent_alloc_region(ex, 0, 0xffffffffUL, EX_NOWAIT); #ifdef CHIP_W1_BUS_START /* * The window may be disabled. We notice this by seeing * -1 as the bus base address. */ if (CHIP_W1_BUS_START(v) == (bus_addr_t) -1) { #ifdef EXTENT_DEBUG printf("xxx: this space is disabled\n"); #endif return; } #ifdef EXTENT_DEBUG printf("xxx: freeing from 0x%x to 0x%x\n", CHIP_W1_BUS_START(v), CHIP_W1_BUS_END(v)); #endif extent_free(ex, CHIP_W1_BUS_START(v), CHIP_W1_BUS_END(v) - CHIP_W1_BUS_START(v) + 1, EX_NOWAIT); #endif #ifdef CHIP_W2_BUS_START if (CHIP_W2_BUS_START(v) != CHIP_W1_BUS_START(v)) { #ifdef EXTENT_DEBUG printf("xxx: freeing from 0x%lx to 0x%lx\n", (u_long)CHIP_W2_BUS_START(v), (u_long)CHIP_W2_BUS_END(v)); #endif extent_free(ex, CHIP_W2_BUS_START(v), CHIP_W2_BUS_END(v) - CHIP_W2_BUS_START(v) + 1, EX_NOWAIT); } else { #ifdef EXTENT_DEBUG printf("xxx: window 2 (0x%lx to 0x%lx) overlaps window 1\n", (u_long)CHIP_W2_BUS_START(v), (u_long)CHIP_W2_BUS_END(v)); #endif } #endif #ifdef CHIP_W3_BUS_START if (CHIP_W3_BUS_START(v) != CHIP_W1_BUS_START(v) && CHIP_W3_BUS_START(v) != CHIP_W2_BUS_START(v)) { #ifdef EXTENT_DEBUG printf("xxx: freeing from 0x%lx to 0x%lx\n", (u_long)CHIP_W3_BUS_START(v), (u_long)CHIP_W3_BUS_END(v)); #endif extent_free(ex, CHIP_W3_BUS_START(v), CHIP_W3_BUS_END(v) - CHIP_W3_BUS_START(v) + 1, EX_NOWAIT); } else { #ifdef EXTENT_DEBUG printf("xxx: window 2 (0x%lx to 0x%lx) overlaps window 1\n", (u_long)CHIP_W2_BUS_START(v), (u_long)CHIP_W2_BUS_END(v)); #endif } #endif #ifdef EXTENT_DEBUG extent_print(ex); #endif CHIP_EXTENT(v) = ex; #endif /* CHIP_EXTENT */ }
void __C(CHIP,_bus_mem_init)(bus_space_tag_t t, void *v) { #ifdef CHIP_D_MEM_W1_SYS_START struct extent *dex; #endif struct extent *sex; /* * Initialize the bus space tag. */ /* cookie */ t->abs_cookie = v; /* mapping/unmapping */ t->abs_map = __C(CHIP,_mem_map); t->abs_unmap = __C(CHIP,_mem_unmap); t->abs_subregion = __C(CHIP,_mem_subregion); t->abs_translate = __C(CHIP,_mem_translate); t->abs_get_window = __C(CHIP,_mem_get_window); /* allocation/deallocation */ t->abs_alloc = __C(CHIP,_mem_alloc); t->abs_free = __C(CHIP,_mem_free); /* get kernel virtual address */ t->abs_vaddr = __C(CHIP,_mem_vaddr); /* mmap for user */ t->abs_mmap = __C(CHIP,_mem_mmap); /* barrier */ t->abs_barrier = __C(CHIP,_mem_barrier); /* read (single) */ t->abs_r_1 = __C(CHIP,_mem_read_1); t->abs_r_2 = __C(CHIP,_mem_read_2); t->abs_r_4 = __C(CHIP,_mem_read_4); t->abs_r_8 = __C(CHIP,_mem_read_8); /* read multiple */ t->abs_rm_1 = __C(CHIP,_mem_read_multi_1); t->abs_rm_2 = __C(CHIP,_mem_read_multi_2); t->abs_rm_4 = __C(CHIP,_mem_read_multi_4); t->abs_rm_8 = __C(CHIP,_mem_read_multi_8); /* read region */ t->abs_rr_1 = __C(CHIP,_mem_read_region_1); t->abs_rr_2 = __C(CHIP,_mem_read_region_2); t->abs_rr_4 = __C(CHIP,_mem_read_region_4); t->abs_rr_8 = __C(CHIP,_mem_read_region_8); /* write (single) */ t->abs_w_1 = __C(CHIP,_mem_write_1); t->abs_w_2 = __C(CHIP,_mem_write_2); t->abs_w_4 = __C(CHIP,_mem_write_4); t->abs_w_8 = __C(CHIP,_mem_write_8); /* write multiple */ t->abs_wm_1 = __C(CHIP,_mem_write_multi_1); t->abs_wm_2 = __C(CHIP,_mem_write_multi_2); t->abs_wm_4 = __C(CHIP,_mem_write_multi_4); t->abs_wm_8 = __C(CHIP,_mem_write_multi_8); /* write region */ t->abs_wr_1 = __C(CHIP,_mem_write_region_1); t->abs_wr_2 = __C(CHIP,_mem_write_region_2); t->abs_wr_4 = __C(CHIP,_mem_write_region_4); t->abs_wr_8 = __C(CHIP,_mem_write_region_8); /* set multiple */ t->abs_sm_1 = __C(CHIP,_mem_set_multi_1); t->abs_sm_2 = __C(CHIP,_mem_set_multi_2); t->abs_sm_4 = __C(CHIP,_mem_set_multi_4); t->abs_sm_8 = __C(CHIP,_mem_set_multi_8); /* set region */ t->abs_sr_1 = __C(CHIP,_mem_set_region_1); t->abs_sr_2 = __C(CHIP,_mem_set_region_2); t->abs_sr_4 = __C(CHIP,_mem_set_region_4); t->abs_sr_8 = __C(CHIP,_mem_set_region_8); /* copy */ t->abs_c_1 = __C(CHIP,_mem_copy_region_1); t->abs_c_2 = __C(CHIP,_mem_copy_region_2); t->abs_c_4 = __C(CHIP,_mem_copy_region_4); t->abs_c_8 = __C(CHIP,_mem_copy_region_8); #ifdef CHIP_D_MEM_W1_SYS_START /* XXX WE WANT EXTENT_NOCOALESCE, BUT WE CAN'T USE IT. XXX */ dex = extent_create(__S(__C(CHIP,_bus_dmem)), 0x0UL, 0xffffffffffffffffUL, (void *)CHIP_D_MEM_EX_STORE(v), CHIP_D_MEM_EX_STORE_SIZE(v), EX_NOWAIT); extent_alloc_region(dex, 0, 0xffffffffffffffffUL, EX_NOWAIT); #ifdef CHIP_D_MEM_W1_BUS_START #ifdef EXTENT_DEBUG printf("dmem: freeing from 0x%lx to 0x%lx\n", CHIP_D_MEM_W1_BUS_START(v), CHIP_D_MEM_W1_BUS_END(v)); #endif extent_free(dex, CHIP_D_MEM_W1_BUS_START(v), CHIP_D_MEM_W1_BUS_END(v) - CHIP_D_MEM_W1_BUS_START(v) + 1, EX_NOWAIT); #endif #ifdef EXTENT_DEBUG extent_print(dex); #endif CHIP_D_MEM_EXTENT(v) = dex; #endif /* CHIP_D_MEM_W1_SYS_START */ /* XXX WE WANT EXTENT_NOCOALESCE, BUT WE CAN'T USE IT. XXX */ sex = extent_create(__S(__C(CHIP,_bus_smem)), 0x0UL, 0xffffffffffffffffUL, (void *)CHIP_S_MEM_EX_STORE(v), CHIP_S_MEM_EX_STORE_SIZE(v), EX_NOWAIT); extent_alloc_region(sex, 0, 0xffffffffffffffffUL, EX_NOWAIT); #ifdef CHIP_S_MEM_W1_BUS_START #ifdef EXTENT_DEBUG printf("smem: freeing from 0x%lx to 0x%lx\n", CHIP_S_MEM_W1_BUS_START(v), CHIP_S_MEM_W1_BUS_END(v)); #endif extent_free(sex, CHIP_S_MEM_W1_BUS_START(v), CHIP_S_MEM_W1_BUS_END(v) - CHIP_S_MEM_W1_BUS_START(v) + 1, EX_NOWAIT); #endif #ifdef CHIP_S_MEM_W2_BUS_START if (CHIP_S_MEM_W2_BUS_START(v) != CHIP_S_MEM_W1_BUS_START(v)) { #ifdef EXTENT_DEBUG printf("smem: freeing from 0x%lx to 0x%lx\n", CHIP_S_MEM_W2_BUS_START(v), CHIP_S_MEM_W2_BUS_END(v)); #endif extent_free(sex, CHIP_S_MEM_W2_BUS_START(v), CHIP_S_MEM_W2_BUS_END(v) - CHIP_S_MEM_W2_BUS_START(v) + 1, EX_NOWAIT); } else { #ifdef EXTENT_DEBUG printf("smem: window 2 (0x%lx to 0x%lx) overlaps window 1\n", CHIP_S_MEM_W2_BUS_START(v), CHIP_S_MEM_W2_BUS_END(v)); #endif } #endif #ifdef CHIP_S_MEM_W3_BUS_START if (CHIP_S_MEM_W3_BUS_START(v) != CHIP_S_MEM_W1_BUS_START(v) && CHIP_S_MEM_W3_BUS_START(v) != CHIP_S_MEM_W2_BUS_START(v)) { #ifdef EXTENT_DEBUG printf("smem: freeing from 0x%lx to 0x%lx\n", CHIP_S_MEM_W3_BUS_START(v), CHIP_S_MEM_W3_BUS_END(v)); #endif extent_free(sex, CHIP_S_MEM_W3_BUS_START(v), CHIP_S_MEM_W3_BUS_END(v) - CHIP_S_MEM_W3_BUS_START(v) + 1, EX_NOWAIT); } else { #ifdef EXTENT_DEBUG printf("smem: window 2 (0x%lx to 0x%lx) overlaps window 1\n", CHIP_S_MEM_W2_BUS_START(v), CHIP_S_MEM_W2_BUS_END(v)); #endif } #endif #ifdef EXTENT_DEBUG extent_print(sex); #endif CHIP_S_MEM_EXTENT(v) = sex; }
void pci_addr_fixup(struct pcibios_softc *sc, pci_chipset_tag_t pc, int maxbus) { extern paddr_t avail_end; const char *verbose_header = "[%s]-----------------------\n" " device vendor product\n" " register space address size\n" "--------------------------------------------\n"; const char *verbose_footer = "--------------------------[%3d devices bogus]\n"; const struct { bus_addr_t start; bus_size_t size; char *name; } system_reserve [] = { { 0xfec00000, 0x100000, "I/O APIC" }, { 0xfee00000, 0x100000, "Local APIC" }, { 0xfffe0000, 0x20000, "BIOS PROM" }, { 0, 0, 0 }, /* terminator */ }, *srp; paddr_t start; int error; sc->extent_mem = extent_create("PCI I/O memory space", PCIADDR_MEM_START, PCIADDR_MEM_END, M_DEVBUF, 0, 0, EX_NOWAIT); KASSERT(sc->extent_mem); sc->extent_port = extent_create("PCI I/O port space", PCIADDR_PORT_START, PCIADDR_PORT_END, M_DEVBUF, 0, 0, EX_NOWAIT); KASSERT(sc->extent_port); /* * 1. check & reserve system BIOS setting. */ PCIBIOS_PRINTV((verbose_header, "System BIOS Setting")); pci_device_foreach(sc, pc, maxbus, pciaddr_resource_reserve); pci_device_foreach(sc, pc, maxbus, pciaddr_resource_reserve_disabled); PCIBIOS_PRINTV((verbose_footer, sc->nbogus)); /* * 2. reserve non-PCI area. */ for (srp = system_reserve; srp->size; srp++) { error = extent_alloc_region(sc->extent_mem, srp->start, srp->size, EX_NOWAIT| EX_MALLOCOK); if (error != 0) printf("WARNING: can't reserve area for %s.\n", srp->name); } /* * 3. determine allocation space */ start = round_page(avail_end + 1); if (start < PCIADDR_ISAMEM_RESERVE) start = PCIADDR_ISAMEM_RESERVE; sc->mem_alloc_start = (start + 0x100000 + 1) & ~(0x100000 - 1); sc->port_alloc_start = PCIADDR_ISAPORT_RESERVE; PCIBIOS_PRINTV((" Physical memory end: 0x%08x\n PCI memory mapped I/O " "space start: 0x%08x\n", avail_end, sc->mem_alloc_start)); /* * 4. do fixup */ PCIBIOS_PRINTV((verbose_header, "PCIBIOS fixup stage")); sc->nbogus = 0; pci_device_foreach(sc, pc, maxbus, pciaddr_resource_allocate); PCIBIOS_PRINTV((verbose_footer, sc->nbogus)); }
void irongate_bus_mem_init2(bus_space_tag_t t, void *v) { u_long size, start, end; int i, error; /* * Since the AMD 751 doesn't have DMA windows, we need to * allocate RAM out of the extent map. */ for (i = 0; i < mem_cluster_cnt; i++) { start = mem_clusters[i].start; size = mem_clusters[i].size & ~PAGE_MASK; end = mem_clusters[i].start + size; if (start <= IOM_BEGIN && end >= IOM_END) { /* * The ISA hole lies somewhere in this * memory cluster. The UP1000 firmware * doesn't report this to us properly, * so we have to cope, since devices are * mapped into the ISA hole, but RAM is * not. * * Sigh, the UP1000 is a really cool machine, * but it is sometimes too PC-like for my * taste. */ if (start < IOM_BEGIN) { error = extent_alloc_region(CHIP_MEM_EXTENT(v), start, (IOM_BEGIN - start), EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0)); if (error) { printf("WARNING: unable to reserve " "chunk from mem cluster %d " "(0x%lx - 0x%lx)\n", i, start, (u_long) IOM_BEGIN - 1); } } if (end > IOM_END) { error = extent_alloc_region(CHIP_MEM_EXTENT(v), IOM_END, (end - IOM_END), EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0)); if (error) { printf("WARNING: unable to reserve " "chunk from mem cluster %d " "(0x%lx - 0x%lx)\n", i, (u_long) IOM_END, end - 1); } } } else { error = extent_alloc_region(CHIP_MEM_EXTENT(v), start, size, EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0)); if (error) { printf("WARNING: unable reserve mem cluster %d " "(0x%lx - 0x%lx)\n", i, start, start + (size - 1)); } } } }
void init_x86_64(paddr_t first_avail) { extern void consinit(void); extern struct extent *iomem_ex; struct region_descriptor region; struct mem_segment_descriptor *ldt_segp; int x, first16q, ist; u_int64_t seg_start, seg_end; u_int64_t seg_start1, seg_end1; cpu_init_msrs(&cpu_info_primary); proc0.p_addr = proc0paddr; cpu_info_primary.ci_curpcb = &proc0.p_addr->u_pcb; x86_bus_space_init(); consinit(); /* XXX SHOULD NOT BE DONE HERE */ /* * Initailize PAGE_SIZE-dependent variables. */ uvm_setpagesize(); #if 0 uvmexp.ncolors = 2; #endif /* * Boot arguments are in a single page specified by /boot. * * We require the "new" vector form, as well as memory ranges * to be given in bytes rather than KB. * * locore copies the data into bootinfo[] for us. */ if ((bootapiver & (BAPIV_VECTOR | BAPIV_BMEMMAP)) == (BAPIV_VECTOR | BAPIV_BMEMMAP)) { if (bootinfo_size >= sizeof(bootinfo)) panic("boot args too big"); getbootinfo(bootinfo, bootinfo_size); } else panic("invalid /boot"); avail_start = PAGE_SIZE; /* BIOS leaves data in low memory */ /* and VM system doesn't work with phys 0 */ #ifdef MULTIPROCESSOR if (avail_start < MP_TRAMPOLINE + PAGE_SIZE) avail_start = MP_TRAMPOLINE + PAGE_SIZE; #endif /* * Call pmap initialization to make new kernel address space. * We must do this before loading pages into the VM system. */ pmap_bootstrap(VM_MIN_KERNEL_ADDRESS, IOM_END + trunc_page(KBTOB(biosextmem))); if (avail_start != PAGE_SIZE) pmap_prealloc_lowmem_ptps(); if (mem_cluster_cnt == 0) { /* * Allocate the physical addresses used by RAM from the iomem * extent map. This is done before the addresses are * page rounded just to make sure we get them all. */ if (extent_alloc_region(iomem_ex, 0, KBTOB(biosbasemem), EX_NOWAIT)) { /* XXX What should we do? */ printf("WARNING: CAN'T ALLOCATE BASE MEMORY FROM " "IOMEM EXTENT MAP!\n"); } mem_clusters[0].start = 0; mem_clusters[0].size = trunc_page(KBTOB(biosbasemem)); physmem += atop(mem_clusters[0].size); if (extent_alloc_region(iomem_ex, IOM_END, KBTOB(biosextmem), EX_NOWAIT)) { /* XXX What should we do? */ printf("WARNING: CAN'T ALLOCATE EXTENDED MEMORY FROM " "IOMEM EXTENT MAP!\n"); } #if 0 #if NISADMA > 0 /* * Some motherboards/BIOSes remap the 384K of RAM that would * normally be covered by the ISA hole to the end of memory * so that it can be used. However, on a 16M system, this * would cause bounce buffers to be allocated and used. * This is not desirable behaviour, as more than 384K of * bounce buffers might be allocated. As a work-around, * we round memory down to the nearest 1M boundary if * we're using any isadma devices and the remapped memory * is what puts us over 16M. */ if (biosextmem > (15*1024) && biosextmem < (16*1024)) { char pbuf[9]; format_bytes(pbuf, sizeof(pbuf), biosextmem - (15*1024)); printf("Warning: ignoring %s of remapped memory\n", pbuf); biosextmem = (15*1024); } #endif #endif mem_clusters[1].start = IOM_END; mem_clusters[1].size = trunc_page(KBTOB(biosextmem)); physmem += atop(mem_clusters[1].size); mem_cluster_cnt = 2; avail_end = IOM_END + trunc_page(KBTOB(biosextmem)); } /* * If we have 16M of RAM or less, just put it all on * the default free list. Otherwise, put the first * 16M of RAM on a lower priority free list (so that * all of the ISA DMA'able memory won't be eaten up * first-off). */ if (avail_end <= (16 * 1024 * 1024)) first16q = VM_FREELIST_DEFAULT; else first16q = VM_FREELIST_FIRST16; /* Make sure the end of the space used by the kernel is rounded. */ first_avail = round_page(first_avail); kern_end = KERNBASE + first_avail; /* * Now, load the memory clusters (which have already been * rounded and truncated) into the VM system. * * NOTE: WE ASSUME THAT MEMORY STARTS AT 0 AND THAT THE KERNEL * IS LOADED AT IOM_END (1M). */ for (x = 0; x < mem_cluster_cnt; x++) { seg_start = mem_clusters[x].start; seg_end = mem_clusters[x].start + mem_clusters[x].size; seg_start1 = 0; seg_end1 = 0; if (seg_start > 0xffffffffULL) { printf("skipping %lld bytes of memory above 4GB\n", seg_end - seg_start); continue; } if (seg_end > 0x100000000ULL) { printf("skipping %lld bytes of memory above 4GB\n", seg_end - 0x100000000ULL); seg_end = 0x100000000ULL; } /* * Skip memory before our available starting point. */ if (seg_end <= avail_start) continue; if (avail_start >= seg_start && avail_start < seg_end) { if (seg_start != 0) panic("init_x86_64: memory doesn't start at 0"); seg_start = avail_start; if (seg_start == seg_end) continue; } /* * If this segment contains the kernel, split it * in two, around the kernel. */ if (seg_start <= IOM_END && first_avail <= seg_end) { seg_start1 = first_avail; seg_end1 = seg_end; seg_end = IOM_END; } /* First hunk */ if (seg_start != seg_end) { if (seg_start <= (16 * 1024 * 1024) && first16q != VM_FREELIST_DEFAULT) { u_int64_t tmp; if (seg_end > (16 * 1024 * 1024)) tmp = (16 * 1024 * 1024); else tmp = seg_end; #if DEBUG_MEMLOAD printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n", (unsigned long long)seg_start, (unsigned long long)tmp, atop(seg_start), atop(tmp)); #endif uvm_page_physload(atop(seg_start), atop(tmp), atop(seg_start), atop(tmp), first16q); seg_start = tmp; } if (seg_start != seg_end) { #if DEBUG_MEMLOAD printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n", (unsigned long long)seg_start, (unsigned long long)seg_end, atop(seg_start), atop(seg_end)); #endif uvm_page_physload(atop(seg_start), atop(seg_end), atop(seg_start), atop(seg_end), VM_FREELIST_DEFAULT); } } /* Second hunk */ if (seg_start1 != seg_end1) { if (seg_start1 <= (16 * 1024 * 1024) && first16q != VM_FREELIST_DEFAULT) { u_int64_t tmp; if (seg_end1 > (16 * 1024 * 1024)) tmp = (16 * 1024 * 1024); else tmp = seg_end1; #if DEBUG_MEMLOAD printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n", (unsigned long long)seg_start1, (unsigned long long)tmp, atop(seg_start1), atop(tmp)); #endif uvm_page_physload(atop(seg_start1), atop(tmp), atop(seg_start1), atop(tmp), first16q); seg_start1 = tmp; } if (seg_start1 != seg_end1) { #if DEBUG_MEMLOAD printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n", (unsigned long long)seg_start1, (unsigned long long)seg_end1, atop(seg_start1), atop(seg_end1)); #endif uvm_page_physload(atop(seg_start1), atop(seg_end1), atop(seg_start1), atop(seg_end1), VM_FREELIST_DEFAULT); } } } /* * Steal memory for the message buffer (at end of core). */ { struct vm_physseg *vps = NULL; psize_t sz = round_page(MSGBUFSIZE); psize_t reqsz = sz; for (x = 0; x < vm_nphysseg; x++) { vps = &vm_physmem[x]; if (ptoa(vps->avail_end) == avail_end) break; } if (x == vm_nphysseg) panic("init_x86_64: can't find end of memory"); /* Shrink so it'll fit in the last segment. */ if ((vps->avail_end - vps->avail_start) < atop(sz)) sz = ptoa(vps->avail_end - vps->avail_start); vps->avail_end -= atop(sz); vps->end -= atop(sz); msgbuf_paddr = ptoa(vps->avail_end); /* Remove the last segment if it now has no pages. */ if (vps->start == vps->end) { for (vm_nphysseg--; x < vm_nphysseg; x++) vm_physmem[x] = vm_physmem[x + 1]; } /* Now find where the new avail_end is. */ for (avail_end = 0, x = 0; x < vm_nphysseg; x++) if (vm_physmem[x].avail_end > avail_end) avail_end = vm_physmem[x].avail_end; avail_end = ptoa(avail_end); /* Warn if the message buffer had to be shrunk. */ if (sz != reqsz) printf("WARNING: %ld bytes not available for msgbuf " "in last cluster (%ld used)\n", reqsz, sz); } /* * XXXfvdl todo: acpi wakeup code. */ pmap_growkernel(VM_MIN_KERNEL_ADDRESS + 32 * 1024 * 1024); pmap_kenter_pa(idt_vaddr, idt_paddr, VM_PROT_READ|VM_PROT_WRITE); pmap_kenter_pa(idt_vaddr + PAGE_SIZE, idt_paddr + PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE); pmap_kenter_pa(lo32_vaddr, lo32_paddr, VM_PROT_READ|VM_PROT_WRITE); idt = (struct gate_descriptor *)idt_vaddr; gdtstore = (char *)(idt + NIDT); ldtstore = gdtstore + DYNSEL_START; /* make gdt gates and memory segments */ set_mem_segment(GDT_ADDR_MEM(gdtstore, GCODE_SEL), 0, 0xfffff, SDT_MEMERA, SEL_KPL, 1, 0, 1); set_mem_segment(GDT_ADDR_MEM(gdtstore, GDATA_SEL), 0, 0xfffff, SDT_MEMRWA, SEL_KPL, 1, 0, 1); set_sys_segment(GDT_ADDR_SYS(gdtstore, GLDT_SEL), ldtstore, LDT_SIZE - 1, SDT_SYSLDT, SEL_KPL, 0); set_mem_segment(GDT_ADDR_MEM(gdtstore, GUCODE_SEL), 0, atop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMERA, SEL_UPL, 1, 0, 1); set_mem_segment(GDT_ADDR_MEM(gdtstore, GUDATA_SEL), 0, atop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMRWA, SEL_UPL, 1, 0, 1); /* make ldt gates and memory segments */ setgate((struct gate_descriptor *)(ldtstore + LSYS5CALLS_SEL), &IDTVEC(oosyscall), 0, SDT_SYS386CGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); *(struct mem_segment_descriptor *)(ldtstore + LUCODE_SEL) = *GDT_ADDR_MEM(gdtstore, GUCODE_SEL); *(struct mem_segment_descriptor *)(ldtstore + LUDATA_SEL) = *GDT_ADDR_MEM(gdtstore, GUDATA_SEL); /* * 32 bit GDT entries. */ set_mem_segment(GDT_ADDR_MEM(gdtstore, GUCODE32_SEL), 0, atop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMERA, SEL_UPL, 1, 1, 0); set_mem_segment(GDT_ADDR_MEM(gdtstore, GUDATA32_SEL), 0, atop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMRWA, SEL_UPL, 1, 1, 0); /* * 32 bit LDT entries. */ ldt_segp = (struct mem_segment_descriptor *)(ldtstore + LUCODE32_SEL); set_mem_segment(ldt_segp, 0, atop(VM_MAXUSER_ADDRESS32) - 1, SDT_MEMERA, SEL_UPL, 1, 1, 0); ldt_segp = (struct mem_segment_descriptor *)(ldtstore + LUDATA32_SEL); set_mem_segment(ldt_segp, 0, atop(VM_MAXUSER_ADDRESS32) - 1, SDT_MEMRWA, SEL_UPL, 1, 1, 0); /* * Other entries. */ memcpy((struct gate_descriptor *)(ldtstore + LSOL26CALLS_SEL), (struct gate_descriptor *)(ldtstore + LSYS5CALLS_SEL), sizeof (struct gate_descriptor)); memcpy((struct gate_descriptor *)(ldtstore + LBSDICALLS_SEL), (struct gate_descriptor *)(ldtstore + LSYS5CALLS_SEL), sizeof (struct gate_descriptor)); /* exceptions */ for (x = 0; x < 32; x++) { ist = (x == 8) ? 1 : 0; setgate(&idt[x], IDTVEC(exceptions)[x], ist, SDT_SYS386IGT, (x == 3 || x == 4) ? SEL_UPL : SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); idt_allocmap[x] = 1; } /* new-style interrupt gate for syscalls */ setgate(&idt[128], &IDTVEC(osyscall), 0, SDT_SYS386IGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); idt_allocmap[128] = 1; setregion(®ion, gdtstore, DYNSEL_START - 1); lgdt(®ion); cpu_init_idt(); #ifdef DDB db_machine_init(); ddb_init(); if (boothowto & RB_KDB) Debugger(); #endif #ifdef KGDB kgdb_port_init(); if (boothowto & RB_KDB) { kgdb_debug_init = 1; kgdb_connect(1); } #endif intr_default_setup(); softintr_init(); splraise(IPL_IPI); enable_intr(); /* Make sure maxproc is sane */ if (maxproc > cpu_maxproc()) maxproc = cpu_maxproc(); }
int gt_bs_extent_init(struct discovery_bus_space *bs, char *name) { u_long start, end; int i, j, error; if (bs->bs_nregion == 0) { bs->bs_extent = extent_create(name, 0xffffffffUL, 0xffffffffUL, M_DEVBUF, NULL, 0, EX_NOCOALESCE|EX_WAITOK); KASSERT(bs->bs_extent != NULL); return 0; } /* * Find the top and bottoms of this bus space. */ start = bs->bs_regions[0].br_start; end = bs->bs_regions[0].br_end; #ifdef DEBUG if (gtpci_debug > 1) printf("gtpci_bs_extent_init: %s: region %d: %#lx-%#lx\n", name, 0, bs->bs_regions[0].br_start, bs->bs_regions[0].br_end); #endif for (i = 1; i < bs->bs_nregion; i++) { if (bs->bs_regions[i].br_start < start) start = bs->bs_regions[i].br_start; if (bs->bs_regions[i].br_end > end) end = bs->bs_regions[i].br_end; #ifdef DEBUG if (gtpci_debug > 1) printf("gtpci_bs_extent_init: %s: region %d:" " %#lx-%#lx\n", name, i, bs->bs_regions[i].br_start, bs->bs_regions[i].br_end); #endif } /* * Now that we have the top and bottom limits of this * bus space, create the extent map that will manage this * space for us. */ #ifdef DEBUG if (gtpci_debug > 1) printf("gtpci_bs_extent_init: %s: create: %#lx-%#lx\n", name, start, end); #endif bs->bs_extent = extent_create(name, start, end, M_DEVBUF, NULL, 0, EX_NOCOALESCE|EX_WAITOK); KASSERT(bs->bs_extent != NULL); /* If there was more than one bus space region, then there * might gaps in between them. Allocate the gap so that * they will not be legal addresses in the extent. */ for (i = 0; i < bs->bs_nregion && bs->bs_nregion > 1; i++) { /* Initial start is "infinity" and the inital end is * is the end of this bus region. */ start = ~0UL; end = bs->bs_regions[i].br_end; /* For each region, if it starts after this region but less * than the saved start, use its start address. If the start * address is one past the end address, then we're done */ for (j = 0; j < bs->bs_nregion && start > end + 1; j++) { if (i == j) continue; if (bs->bs_regions[j].br_start > end && bs->bs_regions[j].br_start < start) start = bs->bs_regions[j].br_start; } /* * If we found a gap, allocate it away. */ if (start != ~0UL && start != end + 1) { #ifdef DEBUG if (gtpci_debug > 1) printf("gtpci_bs_extent_init: %s: alloc(hole): %#lx-%#lx\n", name, end + 1, start - 1); #endif error = extent_alloc_region(bs->bs_extent, end + 1, start - (end + 1), EX_NOWAIT); KASSERT(error == 0); } } return 1; }
static int __BS(map)(void *v, bus_addr_t addr, bus_size_t size, int flags, bus_space_handle_t *hp, int acct) { struct mips_bus_space_translation mbst; int error; /* * Get the translation for this address. */ error = __BS(translate)(v, addr, size, flags, &mbst); if (error) return (error); #ifdef CHIP_EXTENT if (acct == 0) goto mapit; #ifdef EXTENT_DEBUG printf("%s: allocating %#"PRIxBUSADDR" to %#"PRIxBUSADDR"\n", __S(__BS(map)), addr, addr + size - 1); #endif error = extent_alloc_region(CHIP_EXTENT(v), addr, size, EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0)); if (error) { #ifdef EXTENT_DEBUG printf("%s: allocation failed (%d)\n", __S(__BS(map)), error); extent_print(CHIP_EXTENT(v)); #endif return (error); } mapit: #endif /* CHIP_EXTENT */ addr = mbst.mbst_sys_start + (addr - mbst.mbst_bus_start); #if defined(__mips_n32) || defined(_LP64) if (flags & BUS_SPACE_MAP_CACHEABLE) { #ifdef __mips_n32 if (((addr + size) & ~MIPS_PHYS_MASK) == 0) *hp = (intptr_t)MIPS_PHYS_TO_KSEG0(addr); else #endif *hp = MIPS_PHYS_TO_XKPHYS_CACHED(addr); } else if (flags & BUS_SPACE_MAP_PREFETCHABLE) { *hp = MIPS_PHYS_TO_XKPHYS_ACC(addr); } else { #ifdef __mips_n32 if (((addr + size) & ~MIPS_PHYS_MASK) == 0) *hp = (intptr_t)MIPS_PHYS_TO_KSEG1(addr); else #endif *hp = MIPS_PHYS_TO_XKPHYS_UNCACHED(addr); } #else if (((addr + size) & ~MIPS_PHYS_MASK) != 0) { vaddr_t va; paddr_t pa; int s; size = round_page((addr % PAGE_SIZE) + size); va = uvm_km_alloc(kernel_map, size, PAGE_SIZE, UVM_KMF_VAONLY | UVM_KMF_NOWAIT); if (va == 0) return ENOMEM; /* check use of handle_is_km in BS(unmap) */ KASSERT(!(MIPS_KSEG0_P(va) || MIPS_KSEG1_P(va))); *hp = va + (addr & PAGE_MASK); pa = trunc_page(addr); s = splhigh(); while (size != 0) { pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0); pa += PAGE_SIZE; va += PAGE_SIZE; size -= PAGE_SIZE; } pmap_update(pmap_kernel()); splx(s); } else { if (flags & BUS_SPACE_MAP_CACHEABLE) *hp = (intptr_t)MIPS_PHYS_TO_KSEG0(addr); else *hp = (intptr_t)MIPS_PHYS_TO_KSEG1(addr); } #endif return (0); }
void pci_addr_fixup(pci_chipset_tag_t pc, int maxbus) { extern paddr_t avail_end; const char *verbose_header = "[%s]-----------------------\n" " device vendor product\n" " register space address size\n" "--------------------------------------------\n"; const char *verbose_footer = "--------------------------[%3d devices bogus]\n"; const struct { bus_addr_t start; bus_size_t size; const char *name; } system_reserve [] = { { 0xfec00000, 0x100000, "I/O APIC" }, { 0xfee00000, 0x100000, "Local APIC" }, { 0xfffe0000, 0x20000, "BIOS PROM" }, { 0, 0, 0 }, /* terminator */ }, *srp; paddr_t start; int error; pciaddr.extent_mem = extent_create("PCI I/O memory space", PCIADDR_MEM_START, PCIADDR_MEM_END, 0, 0, EX_NOWAIT); KASSERT(pciaddr.extent_mem); pciaddr.extent_port = extent_create("PCI I/O port space", PCIADDR_PORT_START, PCIADDR_PORT_END, 0, 0, EX_NOWAIT); KASSERT(pciaddr.extent_port); /* * 1. check & reserve system BIOS setting. */ aprint_debug(verbose_header, "System BIOS Setting"); pci_device_foreach(pc, maxbus, pciaddr_resource_reserve, NULL); aprint_debug(verbose_footer, pciaddr.nbogus); /* * 2. reserve non-PCI area. */ for (srp = system_reserve; srp->size; srp++) { error = extent_alloc_region(pciaddr.extent_mem, srp->start, srp->size, EX_NOWAIT| EX_MALLOCOK); if (error != 0) { aprint_error("WARNING: can't reserve area for %s.\n", srp->name); } } /* * 3. determine allocation space */ start = x86_round_page(avail_end + 1); if (start < PCIADDR_ISAMEM_RESERVE) start = PCIADDR_ISAMEM_RESERVE; pciaddr.mem_alloc_start = (start + 0x100000 + 1) & ~(0x100000 - 1); pciaddr.port_alloc_start = PCIADDR_ISAPORT_RESERVE; aprint_debug(" Physical memory end: 0x%08x\n PCI memory mapped I/O " "space start: 0x%08x\n", (unsigned)avail_end, (unsigned)pciaddr.mem_alloc_start); if (pciaddr.nbogus == 0) return; /* no need to fixup */ /* * 4. do fixup */ aprint_debug(verbose_header, "PCIBIOS fixup stage"); pciaddr.nbogus = 0; pci_device_foreach_min(pc, 0, maxbus, pciaddr_resource_allocate, NULL); aprint_debug(verbose_footer, pciaddr.nbogus); }