void dumpconf(void) { int nblks; if (dumpdev == NODEV || (nblks = (bdevsw[major(dumpdev)].d_psize)(dumpdev)) == 0) return; if (nblks <= ctod(1)) return; dumpsize = physmem; if (dumpsize > atop(dbtob(nblks - dumplo))) dumpsize = atop(dbtob(nblks - dumplo)); else if (dumplo == 0) dumplo = nblks - btodb(ptoa(dumpsize)); /* * Don't dump on the first block in case the dump * device includes a disk label. */ if (dumplo < btodb(PAGE_SIZE)) dumplo = btodb(PAGE_SIZE); /* Put dump at the end of partition, and make it fit. */ if (dumpsize + 1 > dtoc(nblks - dumplo)) dumpsize = dtoc(nblks - dumplo) - 1; if (dumplo < nblks - ctod(dumpsize) - 1) dumplo = nblks - ctod(dumpsize) - 1; /* memory is contiguous on vax */ cpu_kcore_hdr.ram_segs[0].start = 0; cpu_kcore_hdr.ram_segs[0].size = ptoa(physmem); cpu_kcore_hdr.sysmap = (vaddr_t)Sysmap; }
static void cpu_startup(void *dummy) { /* * Initialise the decrementer-based clock. */ decr_init(); /* * Good {morning,afternoon,evening,night}. */ cpu_setup(PCPU_GET(cpuid)); #ifdef PERFMON perfmon_init(); #endif printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)physmem), ptoa((uintmax_t)physmem) / 1048576); realmem = physmem; if (bootverbose) printf("available KVA = %zu (%zu MB)\n", virtual_end - virtual_avail, (virtual_end - virtual_avail) / 1048576); /* * Display any holes after the first chunk of extended memory. */ if (bootverbose) { int indx; printf("Physical memory chunk(s):\n"); for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { vm_paddr_t size1 = phys_avail[indx + 1] - phys_avail[indx]; #ifdef __powerpc64__ printf("0x%016jx - 0x%016jx, %jd bytes (%jd pages)\n", #else printf("0x%09jx - 0x%09jx, %ju bytes (%ju pages)\n", #endif (uintmax_t)phys_avail[indx], (uintmax_t)phys_avail[indx + 1] - 1, (uintmax_t)size1, (uintmax_t)size1 / PAGE_SIZE); } } vm_ksubmap_init(&kmi); printf("avail memory = %ju (%ju MB)\n", ptoa((uintmax_t)vm_cnt.v_free_count), ptoa((uintmax_t)vm_cnt.v_free_count) / 1048576); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); vm_pager_bufferinit(); }
/* * Prevent loading a kernel if it would overlap the SRM. */ int check_phdr(void *hdr) { Elf64_Phdr *phdr = (Elf64_Phdr *)hdr; struct rpb *hwrpb = (struct rpb *)HWRPB_ADDR; struct mddt *mddtp; struct mddt_cluster *memc; u_int64_t cstart, cend; u_int64_t i; mddtp = (struct mddt *)(((caddr_t)hwrpb) + hwrpb->rpb_memdat_off); for (i = 0; i < mddtp->mddt_cluster_cnt; i++) { memc = &mddtp->mddt_clusters[i]; if (memc->mddt_usage & MDDT_PALCODE) { cstart = ALPHA_PHYS_TO_K0SEG(ptoa(memc->mddt_pfn)); cend = cstart + ptoa(memc->mddt_pg_cnt); if (phdr->p_vaddr + phdr->p_memsz <= cstart || phdr->p_vaddr >= cend) continue; printf("SRM console and kernel image would overlap.\n" "Please report this to <*****@*****.**>, " "with the following values:\n" "SRM range: %p-%p\n" "kernel range: %p-%p\n", cstart, cend, phdr->p_vaddr, phdr->p_vaddr + phdr->p_memsz); return 1; } } return 0; }
void sh_startup() { vaddr_t minaddr, maxaddr; printf("%s", version); if (*cpu_model != '\0') printf("%s\n", cpu_model); #ifdef DEBUG printf("general exception handler:\t%d byte\n", sh_vector_generic_end - sh_vector_generic); printf("TLB miss exception handler:\t%d byte\n", #if defined(SH3) && defined(SH4) CPU_IS_SH3 ? sh3_vector_tlbmiss_end - sh3_vector_tlbmiss : sh4_vector_tlbmiss_end - sh4_vector_tlbmiss #elif defined(SH3) sh3_vector_tlbmiss_end - sh3_vector_tlbmiss #elif defined(SH4) sh4_vector_tlbmiss_end - sh4_vector_tlbmiss #endif ); printf("interrupt exception handler:\t%d byte\n", sh_vector_interrupt_end - sh_vector_interrupt); #endif /* DEBUG */ printf("real mem = %u (%uMB)\n", ptoa(physmem), ptoa(physmem) / 1024 / 1024); /* * Allocate a submap for exec arguments. This map effectively * limits the number of processes exec'ing at any time. */ minaddr = vm_map_min(kernel_map); exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); /* * Allocate a submap for physio */ phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, 0, FALSE, NULL); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free), ptoa(uvmexp.free) / 1024 / 1024); if (boothowto & RB_CONFIG) { #ifdef BOOT_CONFIG user_config(); #else printf("kernel does not support -c; continuing..\n"); #endif } }
void buf_map(struct buf *bp) { vaddr_t va; splassert(IPL_BIO); if (bp->b_data == NULL) { unsigned long i; /* * First, just use the pre-allocated space until we run out. */ if (buf_kva_start < buf_kva_end) { va = buf_kva_start; buf_kva_start += MAXPHYS; bcstats.kvaslots_avail--; } else { struct buf *vbp; /* * Find some buffer we can steal the space from. */ while ((vbp = TAILQ_FIRST(&buf_valist)) == NULL) { buf_needva++; buf_nkvmsleep++; tsleep(&buf_needva, PRIBIO, "buf_needva", 0); } va = buf_unmap(vbp); } mtx_enter(&bp->b_pobj->vmobjlock); for (i = 0; i < atop(bp->b_bufsize); i++) { struct vm_page *pg = uvm_pagelookup(bp->b_pobj, bp->b_poffs + ptoa(i)); KASSERT(pg != NULL); pmap_kenter_pa(va + ptoa(i), VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE); } mtx_leave(&bp->b_pobj->vmobjlock); pmap_update(pmap_kernel()); bp->b_data = (caddr_t)va; } else { TAILQ_REMOVE(&buf_valist, bp, b_valist); bcstats.kvaslots_avail--; } bcstats.busymapped++; CLR(bp->b_flags, B_NOTMAPPED); }
void cpu_startup() { vaddr_t minaddr, maxaddr; /* * Good {morning,afternoon,evening,night}. */ printf(version); printf("real mem = %lu (%luMB)\n", ptoa(physmem), ptoa(physmem)/1024/1024); /* * Grab machine dependent memory spaces */ platform->startup(); /* * Allocate a submap for exec arguments. This map effectively * limits the number of processes exec'ing at any time. */ minaddr = vm_map_min(kernel_map); exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); /* * Allocate map for physio. */ phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, 0, FALSE, NULL); printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free), ptoa(uvmexp.free)/1024/1024); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); /* * Configure the system. */ if (boothowto & RB_CONFIG) { #ifdef BOOT_CONFIG user_config(); #else printf("kernel does not support -c; continuing..\n"); #endif } }
/* * Dynamically set the start address for rbus. This must be called * before rbus is initialized. The start address should be determined * by the amount of installed memory. Generally 1 GB has been found * to be a good value, but it fails on some Thinkpads (e.g. 2645-4AU), * for which 0.5 GB is a good value. It also fails on (at least) * Thinkpads with 2GB of RAM, for which 2 GB is a good value. * * Thus, a general strategy of setting rbus_min_start to the amount of * memory seems in order. However, the actual amount of memory is * generally slightly more than the amount found, e.g. 1014MB vs 1024, * or 2046 vs 2048. */ bus_addr_t rbus_min_start_hint(void) { bus_addr_t rbus_min_start = RBUS_MIN_START; size_t ram = ptoa(physmem); if (ram <= 192 * 1024 * 1024UL) { /* * <= 192 MB, so try 0.5 GB. This will work on * Thinkpad 600E (2645-4AU), which fails at 1 GB, and * on some other older machines that may have trouble * with addresses needing more than 20 bits. */ rbus_min_start = 512 * 1024 * 1024UL; } if (ram >= 1024 * 1024 * 1024UL) { /* * > 1 GB, so try 2 GB. */ rbus_min_start = 2 * 1024 * 1024 * 1024UL; } /* Not tested in > 2 GB case. */ if (ram > 2 * 1024 * 1024 * 1024UL) { /* * > 2 GB, so try 3 GB. */ rbus_min_start = 3 * 1024 * 1024 * 1024UL; } return (rbus_min_start); }
void buf_free_pages(struct buf *bp) { struct uvm_object *uobj = bp->b_pobj; struct vm_page *pg; voff_t off, i; int s; KASSERT(bp->b_data == NULL); KASSERT(uobj != NULL); s = splbio(); off = bp->b_poffs; bp->b_pobj = NULL; bp->b_poffs = 0; for (i = 0; i < atop(bp->b_bufsize); i++) { pg = uvm_pagelookup(uobj, off + ptoa(i)); KASSERT(pg != NULL); KASSERT(pg->wire_count == 1); pg->wire_count = 0; uvm_pagefree(pg); bcstats.numbufpages--; } splx(s); }
void vm_mem_bootstrap(void) { vm_offset_t start, end; /* * Initializes resident memory structures. * From here on, all physical memory is accounted for, * and we use only virtual addresses. */ vm_page_bootstrap(&start, &end); /* * Initialize other VM packages */ zone_bootstrap(); vm_object_bootstrap(); vm_map_init(); kmem_init(start, end); pmap_init(); zone_init((vm_size_t)ptoa(vm_page_free_count)); kalloc_init(); #if MACH_RT rtalloc_init(); #endif /* MACH_RT */ vm_fault_init(); vm_page_module_init(); memory_manager_default_init(); }
int drm_mmap(struct dev_mmap_args *ap) { struct cdev *kdev = ap->a_head.a_dev; vm_offset_t offset = ap->a_offset; struct drm_device *dev = drm_get_device_from_kdev(kdev); struct drm_file *file_priv = NULL; drm_local_map_t *map; enum drm_map_type type; vm_paddr_t phys; DRM_LOCK(); file_priv = drm_find_file_by_proc(dev, DRM_CURPROC); DRM_UNLOCK(); if (file_priv == NULL) { DRM_ERROR("can't find authenticator\n"); return EINVAL; } if (!file_priv->authenticated) return EACCES; if (dev->dma && offset < ptoa(dev->dma->page_count)) { drm_device_dma_t *dma = dev->dma; DRM_SPINLOCK(&dev->dma_lock); if (dma->pagelist != NULL) { unsigned long page = offset >> PAGE_SHIFT; unsigned long phys = dma->pagelist[page]; ap->a_result = atop(phys); DRM_SPINUNLOCK(&dev->dma_lock); return 0; } else {
/* * This is called by configure to set dumplo and dumpsize. * Dumps always skip the first PAGE_SIZE of disk space * in case there might be a disk label stored there. * If there is extra space, put dump at the end to * reduce the chance that swapping trashes it. */ void dumpconf(void) { int nblks; /* size of dump area */ if (dumpdev == NODEV || (nblks = (bdevsw[major(dumpdev)].d_psize)(dumpdev)) == 0) return; if (nblks <= ctod(1)) return; dumpsize = physmem; /* aviion only uses a single segment. */ cpu_kcore_hdr.ram_segs[0].start = 0; cpu_kcore_hdr.ram_segs[0].size = ptoa(physmem); cpu_kcore_hdr.cputype = cputyp; /* * Don't dump on the first block * in case the dump device includes a disk label. */ if (dumplo < ctod(1)) dumplo = ctod(1); /* Put dump at end of partition, and make it fit. */ if (dumpsize + 1 > dtoc(nblks - dumplo)) dumpsize = dtoc(nblks - dumplo) - 1; if (dumplo < nblks - ctod(dumpsize) - 1) dumplo = nblks - ctod(dumpsize) - 1; }
void buf_free_pages(struct buf *bp) { struct uvm_object *uobj = bp->b_pobj; struct vm_page *pg; voff_t off, i; int s; KASSERT(bp->b_data == NULL); KASSERT(uobj != NULL); s = splbio(); off = bp->b_poffs; bp->b_pobj = NULL; bp->b_poffs = 0; mtx_enter(&uobj->vmobjlock); for (i = 0; i < atop(bp->b_bufsize); i++) { pg = uvm_pagelookup(uobj, off + ptoa(i)); KASSERT(pg != NULL); KASSERT(pg->wire_count == 1); pg->wire_count = 0; /* Never on a pageq, no pageqlock needed. */ uvm_pagefree(pg); bcstats.numbufpages--; } mtx_leave(&uobj->vmobjlock); splx(s); }
paddr_t drmmmap(dev_t kdev, off_t offset, int prot) { struct drm_device *dev = drm_get_device_from_kdev(kdev); drm_local_map_t *map; struct drm_file *priv; drm_map_type_t type; paddr_t phys; DRM_LOCK(); priv = drm_find_file_by_minor(dev, minor(kdev)); DRM_UNLOCK(); if (priv == NULL) { DRM_ERROR("can't find authenticator\n"); return (EINVAL); } if (!priv->authenticated) return (EACCES); if (dev->dma && offset >= 0 && offset < ptoa(dev->dma->page_count)) { drm_device_dma_t *dma = dev->dma; DRM_SPINLOCK(&dev->dma_lock); if (dma->pagelist != NULL) { unsigned long page = offset >> PAGE_SHIFT; unsigned long phys = dma->pagelist[page]; DRM_SPINUNLOCK(&dev->dma_lock); return (atop(phys)); } else {
paddr_t drm_mmap(dev_t kdev, off_t offset, int prot) { DRM_DEVICE; drm_local_map_t *map; drm_file_t *priv; drm_map_type_t type; paddr_t phys; uintptr_t roffset; DRM_LOCK(); priv = drm_find_file_by_proc(dev, DRM_CURPROC); DRM_UNLOCK(); if (priv == NULL) { DRM_ERROR("can't find authenticator\n"); return -1; } if (!priv->authenticated) return -1; if (dev->dma && offset >= 0 && offset < ptoa(dev->dma->page_count)) { drm_device_dma_t *dma = dev->dma; DRM_SPINLOCK(&dev->dma_lock); if (dma->pagelist != NULL) { unsigned long page = offset >> PAGE_SHIFT; unsigned long pphys = dma->pagelist[page]; #ifdef macppc return pphys; #else return atop(pphys); #endif } else {
void * cpu_uarea_alloc(bool system) { struct pglist pglist; int error; /* * Allocate a new physically contiguous uarea which can be * direct-mapped. */ error = uvm_pglistalloc(USPACE, 0, ptoa(physmem), 0, 0, &pglist, 1, 1); if (error) { return NULL; } /* * Get the physical address from the first page. */ const struct vm_page * const pg = TAILQ_FIRST(&pglist); KASSERT(pg != NULL); const paddr_t pa = VM_PAGE_TO_PHYS(pg); /* * We need to return a direct-mapped VA for the pa. */ return (void *)PMAP_MAP_POOLPAGE(pa); }
/* * uvm_pagermapin: map pages into KVA for I/O that needs mappings * * We basically just km_valloc a blank map entry to reserve the space in the * kernel map and then use pmap_enter() to put the mappings in by hand. */ vaddr_t uvm_pagermapin(struct vm_page **pps, int npages, int flags) { vaddr_t kva, cva; vm_prot_t prot; vsize_t size; struct vm_page *pp; prot = VM_PROT_READ; if (flags & UVMPAGER_MAPIN_READ) prot |= VM_PROT_WRITE; size = ptoa(npages); KASSERT(size <= MAXBSIZE); kva = uvm_pseg_get(flags); if (kva == 0) return 0; for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) { pp = *pps++; KASSERT(pp); KASSERT(pp->pg_flags & PG_BUSY); /* Allow pmap_enter to fail. */ if (pmap_enter(pmap_kernel(), cva, VM_PAGE_TO_PHYS(pp), prot, PMAP_WIRED | PMAP_CANFAIL | prot) != 0) { pmap_remove(pmap_kernel(), kva, cva); pmap_update(pmap_kernel()); uvm_pseg_release(kva); return 0; } } pmap_update(pmap_kernel()); return kva; }
/* * cpu_startup: allocate memory for variable-sized tables, * initialize CPU, and do autoconfiguration. */ void cpu_startup(void) { vaddr_t minaddr, maxaddr; #ifdef DEBUG extern int pmapdebug; int opmapdebug = pmapdebug; pmapdebug = 0; #endif cpu_setmodel("FIC8234"); if (fputype != FPU_NONE) m68k_make_fpu_idle_frame(); /* * Good {morning,afternoon,evening,night}. */ printf("%s%s", copyright, version); identifycpu(); printf("real mem = %d\n", ctob(physmem)); minaddr = 0; /* * Allocate a submap for physio */ phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, 0, false, NULL); #ifdef DEBUG pmapdebug = opmapdebug; #endif printf("avail mem = %ld\n", ptoa(uvmexp.free)); }
/* * Allocate memory for variable-sized tables, */ void cpu_startup(void) { vaddr_t minaddr, maxaddr; char pbuf[9]; /* * Good {morning,afternoon,evening,night}. */ printf("%s%s", copyright, version); format_bytes(pbuf, sizeof(pbuf), ctob(physmem)); printf("total memory = %s\n", pbuf); minaddr = 0; /* * Allocate a submap for physio. */ phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, 0, false, NULL); /* * (No need to allocate an mbuf cluster submap. Mbuf clusters * are allocated via the pool allocator, and we use KSEG to * map those pages.) */ format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); printf("avail memory = %s\n", pbuf); }
void dumpconf(void) { int nblks, block; if (dumpdev == NODEV || (nblks = (bdevsw[major(dumpdev)].d_psize)(dumpdev)) == 0) return; if (nblks <= ctod(1)) return; dumpsize = physmem; /* Always skip the first CLBYTES, in case there is a label there. */ if (dumplo < ctod(1)) dumplo = ctod(1); /* Put dump at end of partition, and make it fit. */ if (dumpsize + 1 > dtoc(nblks - dumplo)) dumpsize = dtoc(nblks - dumplo) - 1; if (dumplo < nblks - ctod(dumpsize) - 1) dumplo = nblks - ctod(dumpsize) - 1; for (block = 0; block < bootconfig.dramblocks; block++) { cpu_kcore_hdr.ram_segs[block].start = bootconfig.dram[block].address; cpu_kcore_hdr.ram_segs[block].size = ptoa(bootconfig.dram[block].pages); } }
paddr_t mmmmap(dev_t dev, off_t off, int prot) { struct proc *p = curproc; /* XXX */ switch (minor(dev)) { /* minor device 0 is physical memory */ case 0: if ((u_int)off > ptoa(physmem) && suser(p) != 0) return -1; return off; #ifdef APERTURE /* minor device 4 is aperture driver */ case 4: /* Check if a write combining mapping is requested. */ if (off >= MEMRANGE_WC_RANGE) off = (off - MEMRANGE_WC_RANGE) | PMAP_WC; switch (allowaperture) { case 1: /* Allow mapping of the VGA framebuffer & BIOS only */ if ((off >= VGA_START && off <= BIOS_END) || (unsigned)off > (unsigned)ptoa(physmem)) return off; else return -1; case 2: case 3: /* Allow mapping of the whole 1st megabyte for x86emu */ if (off <= BIOS_END || (unsigned)off > (unsigned)ptoa(physmem)) return off; else return -1; default: return -1; } #endif default: return -1; } }
kern_return_t default_pager_info( mach_port_t pager, default_pager_info_t *infop) { vm_size_t pages_total, pages_free; if (pager != default_pager_default_port) return KERN_INVALID_ARGUMENT; bs_global_info(&pages_total, &pages_free); infop->dpi_total_space = ptoa(pages_total); infop->dpi_free_space = ptoa(pages_free); infop->dpi_page_size = vm_page_size; return KERN_SUCCESS; }
/* * Fill the machine-dependent dump header. */ void cpu_init_kcore_hdr() { extern cpu_kcore_hdr_t cpu_kcore_hdr; cpu_kcore_hdr_t *h = &cpu_kcore_hdr; phys_ram_seg_t *seg = cpu_kcore_hdr.kcore_segs; struct vm_physseg *physseg = vm_physmem; u_int i; bzero(h, sizeof(*h)); h->kcore_nsegs = min(NPHYS_RAM_SEGS, (u_int)vm_nphysseg); for (i = h->kcore_nsegs; i != 0; i--) { seg->start = ptoa(physseg->start); seg->size = (psize_t)ptoa(physseg->end - physseg->start); seg++; physseg++; } }
void cpu_startup(void) { extern int physmem; extern struct vm_map *mb_map; vaddr_t minaddr, maxaddr; char pbuf[9]; printf("%s%s", copyright, version); format_bytes(pbuf, sizeof(pbuf), ptoa(physmem)); printf("total memory = %s\n", pbuf); minaddr = 0; mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, nmbclusters * mclbytes, VM_MAP_INTRSAFE, false, NULL); format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); printf("avail memory = %s\n", pbuf); }
/* * Initialize the external I/O extent map. */ void iomap_init(void) { /* extiobase is initialized by pmap_bootstrap(). */ extio_ex = extent_create("extio", (u_long) extiobase, (u_long) extiobase + (ptoa(EIOMAPSIZE) - 1), M_DEVBUF, (void *) extio_ex_storage, sizeof(extio_ex_storage), EX_NOCOALESCE|EX_NOWAIT); }
vaddr_t pmap_bootstrap_md(vaddr_t vaddr) { /* * Get ethernet buffer - need ETHERPAGES pages physically contiguous * below 16MB. */ if (vaddr < 0x01000000 - ptoa(ETHERPAGES)) { etherlen = ptoa(ETHERPAGES); etherbuf = (void *)vaddr; vaddr = pmap_map(vaddr, avail_start, avail_start + etherlen, UVM_PROT_RW, CACHE_INH); virtual_avail += etherlen; avail_start += etherlen; } return vaddr; }
static void cpu_startup(void *dummy) { if (boothowto & RB_VERBOSE) bootverbose++; printf("real memory = %ju (%juK bytes)\n", ptoa((uintmax_t)realmem), ptoa((uintmax_t)realmem) / 1024); /* * Display any holes after the first chunk of extended memory. */ if (bootverbose) { int indx; printf("Physical memory chunk(s):\n"); for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { vm_paddr_t size1 = phys_avail[indx + 1] - phys_avail[indx]; printf("0x%08jx - 0x%08jx, %ju bytes (%ju pages)\n", (uintmax_t)phys_avail[indx], (uintmax_t)phys_avail[indx + 1] - 1, (uintmax_t)size1, (uintmax_t)size1 / PAGE_SIZE); } } vm_ksubmap_init(&kmi); printf("avail memory = %ju (%juMB)\n", ptoa((uintmax_t)cnt.v_free_count), ptoa((uintmax_t)cnt.v_free_count) / 1048576); cpu_init_interrupts(); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); vm_pager_bufferinit(); }
void cpu_startup_common(void) { vaddr_t minaddr, maxaddr; char pbuf[9]; /* "99999 MB" */ pmap_tlb_info_evcnt_attach(&pmap_tlb0_info); #ifdef MULTIPROCESSOR kcpuset_create(&cpus_halted, true); KASSERT(cpus_halted != NULL); kcpuset_create(&cpus_hatched, true); KASSERT(cpus_hatched != NULL); kcpuset_create(&cpus_paused, true); KASSERT(cpus_paused != NULL); kcpuset_create(&cpus_resumed, true); KASSERT(cpus_resumed != NULL); kcpuset_create(&cpus_running, true); KASSERT(cpus_running != NULL); kcpuset_set(cpus_hatched, cpu_number()); kcpuset_set(cpus_running, cpu_number()); #endif cpu_hwrena_setup(); /* * Good {morning,afternoon,evening,night}. */ printf("%s%s", copyright, version); printf("%s\n", cpu_getmodel()); format_bytes(pbuf, sizeof(pbuf), ctob(physmem)); printf("total memory = %s\n", pbuf); minaddr = 0; /* * Allocate a submap for physio. */ phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, 0, FALSE, NULL); /* * (No need to allocate an mbuf cluster submap. Mbuf clusters * are allocated via the pool allocator, and we use KSEG/XKPHYS to * map those pages.) */ format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); printf("avail memory = %s\n", pbuf); #if defined(__mips_n32) module_machine = "mips-n32"; #endif }
static void cpu_booke_startup(void *dummy) { int indx; unsigned long size; /* Initialise the decrementer-based clock. */ decr_init(); /* Good {morning,afternoon,evening,night}. */ cpu_setup(PCPU_GET(cpuid)); printf("real memory = %lu (%ld MB)\n", ptoa(physmem), ptoa(physmem) / 1048576); realmem = physmem; /* Display any holes after the first chunk of extended memory. */ if (bootverbose) { printf("Physical memory chunk(s):\n"); for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { size = phys_avail[indx + 1] - phys_avail[indx]; printf("0x%08x - 0x%08x, %lu bytes (%lu pages)\n", phys_avail[indx], phys_avail[indx + 1] - 1, size, size / PAGE_SIZE); } } vm_ksubmap_init(&kmi); printf("avail memory = %lu (%ld MB)\n", ptoa(vm_cnt.v_free_count), ptoa(vm_cnt.v_free_count) / 1048576); /* Set up buffers, so they can be used to read disk labels. */ bufinit(); vm_pager_bufferinit(); /* Cpu supports execution permissions on the pages. */ elf32_nxstack = 1; }
/* * Return the RSS and VMS as a Python tuple. */ static PyObject* get_memory_info(PyObject* self, PyObject* args) { long pid; struct kinfo_proc kp; if (! PyArg_ParseTuple(args, "l", &pid)) { return NULL; } if (get_kinfo_proc(pid, &kp) == -1) { return NULL; } return Py_BuildValue("(ll)", ptoa(kp.ki_rssize), (long)kp.ki_size); }
/* * cpu_startup: allocate memory for variable-sized tables, initialize CPU, and * do auto-configuration. */ void cpu_startup() { vaddr_t minaddr, maxaddr; #ifdef PMAPDEBUG extern int pmapdebug; int opmapdebug = pmapdebug; pmapdebug = 0; /* Shut up pmap debug during bootstrap. */ #endif /* * Good {morning,afternoon,evening,night}. */ printf(version); printf("real mem = %lu (%luMB)\n", ptoa((psize_t)physmem), ptoa((psize_t)physmem)/1024/1024); printf("rsvd mem = %lu (%luMB)\n", ptoa((psize_t)rsvdmem), ptoa((psize_t)rsvdmem)/1024/1024); /* * Allocate a submap for exec arguments. This map effectively * limits the number of processes exec'ing at any time. */ minaddr = vm_map_min(kernel_map); exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); /* Allocate a submap for physio. */ phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, 0, FALSE, NULL); #ifdef PMAPDEBUG pmapdebug = opmapdebug; #endif printf("avail mem = %lu (%luMB)\n", ptoa((psize_t)uvmexp.free), ptoa((psize_t)uvmexp.free)/1024/1024); /* * Set up CPU-specific registers, cache, etc. */ initcpu(); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); /* * Configure the system. */ if (boothowto & RB_CONFIG) { #ifdef BOOT_CONFIG user_config(); #else printf("kernel does not support -c; continuing..\n"); #endif } }