/* * MPSAFE */ static vm_object_t phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff) { vm_object_t object; /* * Offset should be page aligned. */ if (foff & PAGE_MASK) return (NULL); size = round_page(size); if (handle != NULL) { mtx_lock(&Giant); /* * Lock to prevent object creation race condition. */ while (phys_pager_alloc_lock) { phys_pager_alloc_lock = -1; tsleep(&phys_pager_alloc_lock, PVM, "swpalc", 0); } phys_pager_alloc_lock = 1; /* * Look up pager, creating as necessary. */ object = vm_pager_object_lookup(&phys_pager_object_list, handle); if (object == NULL) { /* * Allocate object and associate it with the pager. */ object = vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(foff + size)); object->handle = handle; mtx_lock(&phys_pager_mtx); TAILQ_INSERT_TAIL(&phys_pager_object_list, object, pager_object_list); mtx_unlock(&phys_pager_mtx); } else { /* * Gain a reference to the object. */ vm_object_reference(object); if (OFF_TO_IDX(foff + size) > object->size) object->size = OFF_TO_IDX(foff + size); } if (phys_pager_alloc_lock == -1) wakeup(&phys_pager_alloc_lock); phys_pager_alloc_lock = 0; mtx_unlock(&Giant); } else { object = vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(foff + size)); } return (object); }
/* * MPSAFE */ static vm_object_t phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred) { vm_object_t object, object1; vm_pindex_t pindex; /* * Offset should be page aligned. */ if (foff & PAGE_MASK) return (NULL); pindex = OFF_TO_IDX(foff + PAGE_MASK + size); if (handle != NULL) { mtx_lock(&phys_pager_mtx); /* * Look up pager, creating as necessary. */ object1 = NULL; object = vm_pager_object_lookup(&phys_pager_object_list, handle); if (object == NULL) { /* * Allocate object and associate it with the pager. */ mtx_unlock(&phys_pager_mtx); object1 = vm_object_allocate(OBJT_PHYS, pindex); mtx_lock(&phys_pager_mtx); object = vm_pager_object_lookup(&phys_pager_object_list, handle); if (object != NULL) { /* * We raced with other thread while * allocating object. */ if (pindex > object->size) object->size = pindex; } else { object = object1; object1 = NULL; object->handle = handle; TAILQ_INSERT_TAIL(&phys_pager_object_list, object, pager_object_list); } } else { if (pindex > object->size) object->size = pindex; } mtx_unlock(&phys_pager_mtx); vm_object_deallocate(object1); } else { object = vm_object_allocate(OBJT_PHYS, pindex); } return (object); }
/* * Allocate (or lookup) pager for a vnode. * Handle is a vnode pointer. * * MPSAFE */ vm_object_t vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t offset, struct ucred *cred) { vm_object_t object; struct vnode *vp; /* * Pageout to vnode, no can do yet. */ if (handle == NULL) return (NULL); vp = (struct vnode *) handle; /* * If the object is being terminated, wait for it to * go away. */ retry: while ((object = vp->v_object) != NULL) { VM_OBJECT_LOCK(object); if ((object->flags & OBJ_DEAD) == 0) break; vm_object_set_flag(object, OBJ_DISCONNECTWNT); msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vadead", 0); } if (vp->v_usecount == 0) panic("vnode_pager_alloc: no vnode reference"); if (object == NULL) { /* * Add an object of the appropriate size */ object = vm_object_allocate(OBJT_VNODE, OFF_TO_IDX(round_page(size))); object->un_pager.vnp.vnp_size = size; object->un_pager.vnp.writemappings = 0; object->handle = handle; VI_LOCK(vp); if (vp->v_object != NULL) { /* * Object has been created while we were sleeping */ VI_UNLOCK(vp); vm_object_destroy(object); goto retry; } vp->v_object = object; VI_UNLOCK(vp); } else { object->ref_count++; VM_OBJECT_UNLOCK(object); } vref(vp); return (object); }
/* * no_pager_alloc just returns an initialized object. */ static vm_object_t default_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t offset) { if (handle != NULL) panic("default_pager_alloc: handle specified"); return vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(round_page(offset + size))); }
static int rtR0MemObjFreeBSDAllocPhysPages(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment, bool fContiguous, int rcNoMem) { uint32_t cPages = atop(cb); vm_paddr_t VmPhysAddrHigh; /* create the object. */ PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), enmType, NULL, cb); if (!pMemFreeBSD) return VERR_NO_MEMORY; pMemFreeBSD->pObject = vm_object_allocate(OBJT_PHYS, atop(cb)); if (PhysHighest != NIL_RTHCPHYS) VmPhysAddrHigh = PhysHighest; else VmPhysAddrHigh = ~(vm_paddr_t)0; int rc = rtR0MemObjFreeBSDPhysAllocHelper(pMemFreeBSD->pObject, cPages, VmPhysAddrHigh, uAlignment, fContiguous, true, rcNoMem); if (RT_SUCCESS(rc)) { if (fContiguous) { Assert(enmType == RTR0MEMOBJTYPE_PHYS); #if __FreeBSD_version >= 1000030 VM_OBJECT_WLOCK(pMemFreeBSD->pObject); #else VM_OBJECT_LOCK(pMemFreeBSD->pObject); #endif pMemFreeBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(vm_page_find_least(pMemFreeBSD->pObject, 0)); #if __FreeBSD_version >= 1000030 VM_OBJECT_WUNLOCK(pMemFreeBSD->pObject); #else VM_OBJECT_UNLOCK(pMemFreeBSD->pObject); #endif pMemFreeBSD->Core.u.Phys.fAllocated = true; } *ppMem = &pMemFreeBSD->Core; } else { vm_object_deallocate(pMemFreeBSD->pObject); rtR0MemObjDelete(&pMemFreeBSD->Core); } return rc; }
static vm_object_t sg_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred) { struct sglist *sg; vm_object_t object; vm_pindex_t npages, pindex; int i; /* * Offset should be page aligned. */ if (foff & PAGE_MASK) return (NULL); /* * The scatter/gather list must only include page-aligned * ranges. */ npages = 0; sg = handle; for (i = 0; i < sg->sg_nseg; i++) { if ((sg->sg_segs[i].ss_paddr % PAGE_SIZE) != 0 || (sg->sg_segs[i].ss_len % PAGE_SIZE) != 0) return (NULL); npages += sg->sg_segs[i].ss_len / PAGE_SIZE; } /* * The scatter/gather list has a fixed size. Refuse requests * to map beyond that. */ size = round_page(size); pindex = OFF_TO_IDX(foff) + OFF_TO_IDX(size); if (pindex > npages || pindex < OFF_TO_IDX(foff) || pindex < OFF_TO_IDX(size)) return (NULL); /* * Allocate a new object and associate it with the * scatter/gather list. It is ok for our purposes to have * multiple VM objects associated with the same scatter/gather * list because scatter/gather lists are static. This is also * simpler than ensuring a unique object per scatter/gather * list. */ object = vm_object_allocate(OBJT_SG, npages); object->handle = sglist_hold(sg); TAILQ_INIT(&object->un_pager.sgp.sgp_pglist); return (object); }
static int rtR0MemObjFreeBSDAllocHelper(PRTR0MEMOBJFREEBSD pMemFreeBSD, bool fExecutable, vm_paddr_t VmPhysAddrHigh, bool fContiguous, int rcNoMem) { vm_offset_t MapAddress = vm_map_min(kernel_map); size_t cPages = atop(pMemFreeBSD->Core.cb); int rc; pMemFreeBSD->pObject = vm_object_allocate(OBJT_PHYS, cPages); /* No additional object reference for auto-deallocation upon unmapping. */ #if __FreeBSD_version >= 1000055 rc = vm_map_find(kernel_map, pMemFreeBSD->pObject, 0, &MapAddress, pMemFreeBSD->Core.cb, 0, VMFS_ANY_SPACE, fExecutable ? VM_PROT_ALL : VM_PROT_RW, VM_PROT_ALL, 0); #else rc = vm_map_find(kernel_map, pMemFreeBSD->pObject, 0, &MapAddress, pMemFreeBSD->Core.cb, VMFS_ANY_SPACE, fExecutable ? VM_PROT_ALL : VM_PROT_RW, VM_PROT_ALL, 0); #endif if (rc == KERN_SUCCESS) { rc = rtR0MemObjFreeBSDPhysAllocHelper(pMemFreeBSD->pObject, cPages, VmPhysAddrHigh, PAGE_SIZE, fContiguous, false, rcNoMem); if (RT_SUCCESS(rc)) { vm_map_wire(kernel_map, MapAddress, MapAddress + pMemFreeBSD->Core.cb, VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); /* Store start address */ pMemFreeBSD->Core.pv = (void *)MapAddress; return VINF_SUCCESS; } vm_map_remove(kernel_map, MapAddress, MapAddress + pMemFreeBSD->Core.cb); } else { rc = rcNoMem; /** @todo fix translation (borrow from darwin) */ vm_object_deallocate(pMemFreeBSD->pObject); } rtR0MemObjDelete(&pMemFreeBSD->Core); return rc; }
/* * no_pager_alloc just returns an initialized object. */ static vm_object_t default_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t offset, struct ucred *cred) { vm_object_t object; if (handle != NULL) panic("default_pager_alloc: handle specified"); if (cred != NULL) { if (!swap_reserve_by_cred(size, cred)) return (NULL); crhold(cred); } object = vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(round_page(offset + size))); if (cred != NULL) { VM_OBJECT_WLOCK(object); object->cred = cred; object->charge = size; VM_OBJECT_WUNLOCK(object); } return (object); }
static void os_balloonobject_create(void) { global_state.vmobject = vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)); }
kern_return_t kmem_alloc( vm_map_t map, vm_offset_t *addrp, vm_size_t size) { vm_object_t object; vm_map_entry_t entry; vm_offset_t addr; unsigned int attempts; kern_return_t kr; /* * Allocate a new object. We must do this before locking * the map, lest we risk deadlock with the default pager: * device_read_alloc uses kmem_alloc, * which tries to allocate an object, * which uses kmem_alloc_wired to get memory, * which blocks for pages. * then the default pager needs to read a block * to process a memory_object_data_write, * and device_read_alloc calls kmem_alloc * and deadlocks on the map lock. */ size = round_page(size); object = vm_object_allocate(size); attempts = 0; retry: vm_map_lock(map); kr = vm_map_find_entry(map, &addr, size, (vm_offset_t) 0, VM_OBJECT_NULL, &entry); if (kr != KERN_SUCCESS) { vm_map_unlock(map); if (attempts == 0) { attempts++; slab_collect(); goto retry; } printf_once("no more room for kmem_alloc in %p\n", map); vm_object_deallocate(object); return kr; } entry->object.vm_object = object; entry->offset = 0; /* * Since we have not given out this address yet, * it is safe to unlock the map. */ vm_map_unlock(map); /* * Allocate wired-down memory in the kernel_object, * for this entry, and enter it in the kernel pmap. */ kmem_alloc_pages(object, 0, addr, addr + size, VM_PROT_DEFAULT); /* * Return the memory, not zeroed. */ *addrp = addr; return KERN_SUCCESS; }
vm_object_t cdev_pager_allocate(void *handle, enum obj_type tp, struct cdev_pager_ops *ops, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred) { vm_object_t object, object1; vm_pindex_t pindex; u_short color; if (tp != OBJT_DEVICE && tp != OBJT_MGTDEVICE) return (NULL); /* * Offset should be page aligned. */ if (foff & PAGE_MASK) return (NULL); size = round_page(size); pindex = OFF_TO_IDX(foff + size); if (ops->cdev_pg_ctor(handle, size, prot, foff, cred, &color) != 0) return (NULL); mtx_lock(&dev_pager_mtx); /* * Look up pager, creating as necessary. */ object1 = NULL; object = vm_pager_object_lookup(&dev_pager_object_list, handle); if (object == NULL) { /* * Allocate object and associate it with the pager. Initialize * the object's pg_color based upon the physical address of the * device's memory. */ mtx_unlock(&dev_pager_mtx); object1 = vm_object_allocate(tp, pindex); object1->flags |= OBJ_COLORED; object1->pg_color = color; object1->handle = handle; object1->un_pager.devp.ops = ops; object1->un_pager.devp.dev = handle; TAILQ_INIT(&object1->un_pager.devp.devp_pglist); mtx_lock(&dev_pager_mtx); object = vm_pager_object_lookup(&dev_pager_object_list, handle); if (object != NULL) { /* * We raced with other thread while allocating object. */ if (pindex > object->size) object->size = pindex; } else { object = object1; object1 = NULL; object->handle = handle; TAILQ_INSERT_TAIL(&dev_pager_object_list, object, pager_object_list); KASSERT(object->type == tp, ("Inconsistent device pager type %p %d", object, tp)); } } else { if (pindex > object->size) object->size = pindex; } mtx_unlock(&dev_pager_mtx); if (object1 != NULL) { object1->handle = object1; mtx_lock(&dev_pager_mtx); TAILQ_INSERT_TAIL(&dev_pager_object_list, object1, pager_object_list); mtx_unlock(&dev_pager_mtx); vm_object_deallocate(object1); } return (object); }
int acpi_sleep_machdep(struct acpi_softc *sc, int state) { ACPI_STATUS status; vm_offset_t oldphys; struct pmap *pm; vm_page_t page; static vm_page_t opage = NULL; int ret = 0; int pteobj_allocated = 0; u_long ef; struct proc *p; if (sc->acpi_wakeaddr == 0) { return (0); } AcpiSetFirmwareWakingVector(sc->acpi_wakephys); ef = read_eflags(); disable_intr(); /* Create Identity Mapping */ if ((p = curproc) == NULL) p = &proc0; pm = vmspace_pmap(p->p_vmspace); if (pm->pm_pteobj == NULL) { pm->pm_pteobj = vm_object_allocate(OBJT_DEFAULT, PTDPTDI + 1); pteobj_allocated = 1; } oldphys = pmap_extract(pm, sc->acpi_wakephys); if (oldphys) { opage = PHYS_TO_VM_PAGE(oldphys); } page = PHYS_TO_VM_PAGE(sc->acpi_wakephys); pmap_enter(pm, sc->acpi_wakephys, page, VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE, 1); ret_addr = 0; if (acpi_savecpu()) { /* Execute Sleep */ p_gdt = (struct region_descriptor *)(sc->acpi_wakeaddr + physical_gdt); p_gdt->rd_limit = r_gdt.rd_limit; p_gdt->rd_base = vtophys(r_gdt.rd_base); WAKECODE_FIXUP(physical_esp, u_int32_t, vtophys(r_esp)); WAKECODE_FIXUP(previous_cr0, u_int32_t, r_cr0); WAKECODE_FIXUP(previous_cr2, u_int32_t, r_cr2); WAKECODE_FIXUP(previous_cr3, u_int32_t, r_cr3); WAKECODE_FIXUP(previous_cr4, u_int32_t, r_cr4); WAKECODE_FIXUP(previous_tr, u_int16_t, r_tr); WAKECODE_BCOPY(previous_gdt, struct region_descriptor, r_gdt); WAKECODE_FIXUP(previous_ldt, u_int16_t, r_ldt); WAKECODE_BCOPY(previous_idt, struct region_descriptor, r_idt); WAKECODE_FIXUP(where_to_recover, void, acpi_restorecpu); WAKECODE_FIXUP(previous_ds, u_int16_t, r_ds); WAKECODE_FIXUP(previous_es, u_int16_t, r_es); WAKECODE_FIXUP(previous_fs, u_int16_t, r_fs); WAKECODE_FIXUP(previous_gs, u_int16_t, r_gs); WAKECODE_FIXUP(previous_ss, u_int16_t, r_ss); if (acpi_get_verbose(sc)) { acpi_printcpu(); } wbinvd(); if (state == ACPI_STATE_S4 && sc->acpi_s4bios) { status = AcpiEnterSleepStateS4Bios(); } else { status = AcpiEnterSleepState(state); } if (status != AE_OK) { device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n", AcpiFormatException(status)); ret = -1; goto out; } for (;;) ; } else { /* Execute Wakeup */ #if 0 initializecpu(); #endif icu_reinit(); if (acpi_get_verbose(sc)) { acpi_savecpu(); acpi_printcpu(); } } out: vm_page_lock_queues(); pmap_remove(pm, sc->acpi_wakephys, sc->acpi_wakephys + PAGE_SIZE); vm_page_unlock_queues(); if (opage) { pmap_enter(pm, sc->acpi_wakephys, page, VM_PROT_READ | VM_PROT_WRITE, 0); } if (pteobj_allocated) { vm_object_deallocate(pm->pm_pteobj); pm->pm_pteobj = NULL; } write_eflags(ef); return (ret); }
kern_return_t projected_buffer_allocate( vm_map_t map, vm_size_t size, int persistence, vm_offset_t *kernel_p, vm_offset_t *user_p, vm_prot_t protection, vm_inherit_t inheritance) /*Currently only VM_INHERIT_NONE supported*/ { vm_object_t object; vm_map_entry_t u_entry, k_entry; vm_offset_t addr; vm_size_t r_size; kern_return_t kr; if (map == VM_MAP_NULL || map == kernel_map) return(KERN_INVALID_ARGUMENT); /* * Allocate a new object. */ size = round_page(size); object = vm_object_allocate(size); vm_map_lock(kernel_map); kr = vm_map_find_entry(kernel_map, &addr, size, (vm_offset_t) 0, VM_OBJECT_NULL, &k_entry); if (kr != KERN_SUCCESS) { vm_map_unlock(kernel_map); vm_object_deallocate(object); return kr; } k_entry->object.vm_object = object; if (!persistence) k_entry->projected_on = (vm_map_entry_t) -1; /*Mark entry so as to automatically deallocate it when last corresponding user entry is deallocated*/ vm_map_unlock(kernel_map); *kernel_p = addr; vm_map_lock(map); kr = vm_map_find_entry(map, &addr, size, (vm_offset_t) 0, VM_OBJECT_NULL, &u_entry); if (kr != KERN_SUCCESS) { vm_map_unlock(map); vm_map_lock(kernel_map); vm_map_entry_delete(kernel_map, k_entry); vm_map_unlock(kernel_map); vm_object_deallocate(object); return kr; } u_entry->object.vm_object = object; vm_object_reference(object); u_entry->projected_on = k_entry; /*Creates coupling with kernel mapping of the buffer, and also guarantees that user cannot directly manipulate buffer VM entry*/ u_entry->protection = protection; u_entry->max_protection = protection; u_entry->inheritance = inheritance; vm_map_unlock(map); *user_p = addr; /* * Allocate wired-down memory in the object, * and enter it in the kernel pmap. */ kmem_alloc_pages(object, 0, *kernel_p, *kernel_p + size, VM_PROT_READ | VM_PROT_WRITE); memset((void*) *kernel_p, 0, size); /*Zero fill*/ /* Set up physical mappings for user pmap */ pmap_pageable(map->pmap, *user_p, *user_p + size, FALSE); for (r_size = 0; r_size < size; r_size += PAGE_SIZE) { addr = pmap_extract(kernel_pmap, *kernel_p + r_size); pmap_enter(map->pmap, *user_p + r_size, addr, protection, TRUE); } return(KERN_SUCCESS); }
/* * cpu_startup: allocate memory for variable-sized tables, * initialize cpu, and do autoconfiguration. */ cpu_startup() { register unsigned i; register caddr_t v, firstaddr; int base, residual; vm_offset_t minaddr, maxaddr; vm_size_t size; #ifdef BUFFERS_UNMANAGED vm_offset_t bufmemp; caddr_t buffermem; int ix; #endif #ifdef DEBUG extern int pmapdebug; int opmapdebug = pmapdebug; pmapdebug = 0; #endif /* * Initialize error message buffer (at end of core). * avail_end was pre-decremented in pmap_bootstrap to compensate. */ for (i = 0; i < btoc(sizeof (struct msgbuf)); i++) pmap_enter(kernel_pmap, (vm_offset_t)msgbufp, avail_end + i * NBPG, VM_PROT_ALL, TRUE); msgbufmapped = 1; /* * Good {morning,afternoon,evening,night}. */ printf(version); identifycpu(); printf("real mem = %d\n", ctob(physmem)); /* * Allocate space for system data structures. * The first available real memory address is in "firstaddr". * The first available kernel virtual address is in "v". * As pages of kernel virtual memory are allocated, "v" is incremented. * As pages of memory are allocated and cleared, * "firstaddr" is incremented. * An index into the kernel page table corresponding to the * virtual memory address maintained in "v" is kept in "mapaddr". */ /* * Make two passes. The first pass calculates how much memory is * needed and allocates it. The second pass assigns virtual * addresses to the various data structures. */ firstaddr = 0; again: v = (caddr_t)firstaddr; #define valloc(name, type, num) \ (name) = (type *)v; v = (caddr_t)((name)+(num)) #define valloclim(name, type, num, lim) \ (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num))) valloc(cfree, struct cblock, nclist); valloc(callout, struct callout, ncallout); valloc(swapmap, struct map, nswapmap = maxproc * 2); #ifdef SYSVSHM valloc(shmsegs, struct shmid_ds, shminfo.shmmni); #endif /* * Determine how many buffers to allocate. * Since HPs tend to be long on memory and short on disk speed, * we allocate more buffer space than the BSD standard of * use 10% of memory for the first 2 Meg, 5% of remaining. * We just allocate a flat 10%. Insure a minimum of 16 buffers. * We allocate 1/2 as many swap buffer headers as file i/o buffers. */ if (bufpages == 0) bufpages = physmem / 10 / CLSIZE; if (nbuf == 0) { nbuf = bufpages; if (nbuf < 16) nbuf = 16; } if (nswbuf == 0) { nswbuf = (nbuf / 2) &~ 1; /* force even */ if (nswbuf > 256) nswbuf = 256; /* sanity */ } valloc(swbuf, struct buf, nswbuf); valloc(buf, struct buf, nbuf); /* * End of first pass, size has been calculated so allocate memory */ if (firstaddr == 0) { size = (vm_size_t)(v - firstaddr); firstaddr = (caddr_t) kmem_alloc(kernel_map, round_page(size)); if (firstaddr == 0) panic("startup: no room for tables"); #ifdef BUFFERS_UNMANAGED buffermem = (caddr_t) kmem_alloc(kernel_map, bufpages*CLBYTES); if (buffermem == 0) panic("startup: no room for buffers"); #endif goto again; } /* * End of second pass, addresses have been assigned */ if ((vm_size_t)(v - firstaddr) != size) panic("startup: table size inconsistency"); /* * Now allocate buffers proper. They are different than the above * in that they usually occupy more virtual memory than physical. */ size = MAXBSIZE * nbuf; buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers, &maxaddr, size, TRUE); minaddr = (vm_offset_t)buffers; if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0, &minaddr, size, FALSE) != KERN_SUCCESS) panic("startup: cannot allocate buffers"); base = bufpages / nbuf; residual = bufpages % nbuf; #ifdef BUFFERS_UNMANAGED bufmemp = (vm_offset_t) buffermem; #endif for (i = 0; i < nbuf; i++) { vm_size_t curbufsize; vm_offset_t curbuf; /* * First <residual> buffers get (base+1) physical pages * allocated for them. The rest get (base) physical pages. * * The rest of each buffer occupies virtual space, * but has no physical memory allocated for it. */ curbuf = (vm_offset_t)buffers + i * MAXBSIZE; curbufsize = CLBYTES * (i < residual ? base+1 : base); #ifdef BUFFERS_UNMANAGED /* * Move the physical pages over from buffermem. */ for (ix = 0; ix < curbufsize/CLBYTES; ix++) { vm_offset_t pa; pa = pmap_extract(kernel_pmap, bufmemp); if (pa == 0) panic("startup: unmapped buffer"); pmap_remove(kernel_pmap, bufmemp, bufmemp+CLBYTES); pmap_enter(kernel_pmap, (vm_offset_t)(curbuf + ix * CLBYTES), pa, VM_PROT_READ|VM_PROT_WRITE, TRUE); bufmemp += CLBYTES; } #else vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE); vm_map_simplify(buffer_map, curbuf); #endif } #ifdef BUFFERS_UNMANAGED #if 0 /* * We would like to free the (now empty) original address range * but too many bad things will happen if we try. */ kmem_free(kernel_map, (vm_offset_t)buffermem, bufpages*CLBYTES); #endif #endif /* * Allocate a submap for exec arguments. This map effectively * limits the number of processes exec'ing at any time. */ exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 16*NCARGS, TRUE); /* * Allocate a submap for physio */ phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, TRUE); /* * Finally, allocate mbuf pool. Since mclrefcnt is an off-size * we use the more space efficient malloc in place of kmem_alloc. */ mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES, M_MBUF, M_NOWAIT); bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES); mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr, VM_MBUF_SIZE, FALSE); /* * Initialize callouts */ callfree = callout; for (i = 1; i < ncallout; i++) callout[i-1].c_next = &callout[i]; callout[i-1].c_next = NULL; #ifdef DEBUG pmapdebug = opmapdebug; #endif printf("avail mem = %d\n", ptoa(cnt.v_free_count)); printf("using %d buffers containing %d bytes of memory\n", nbuf, bufpages * CLBYTES); /* * Set up CPU-specific registers, cache, etc. */ initcpu(); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); /* * Configure the system. */ configure(); }
/* * Machine-dependent startup code */ cpu_startup() { register unsigned i; register caddr_t v; register int sz; int base, residual; #ifdef DEBUG extern int pmapdebug; int opmapdebug = pmapdebug; #endif vm_offset_t minaddr, maxaddr; vm_size_t size; #ifdef DEBUG pmapdebug = 0; #endif /* * Good {morning,afternoon,evening,night}. */ printf(version); /*identifycpu();*/ physmem = btoc(avail_end); printf("real mem = %d\n", avail_end); /* * Find out how much space we need, allocate it, * and then give everything true virtual addresses. */ sz = (int)allocsys((caddr_t)0); if ((v = (caddr_t)kmem_alloc(kernel_map, round_page(sz))) == 0) panic("startup: no room for tables"); if (allocsys(v) - v != sz) panic("startup: table size inconsistency"); /* * Now allocate buffers proper. They are different than the above * in that they usually occupy more virtual memory than physical. */ size = MAXBSIZE * nbuf; buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers, &maxaddr, size, TRUE); minaddr = (vm_offset_t)buffers; if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0, &minaddr, size, FALSE) != KERN_SUCCESS) panic("startup: cannot allocate buffers"); base = bufpages / nbuf; residual = bufpages % nbuf; for (i = 0; i < nbuf; i++) { vm_size_t curbufsize; vm_offset_t curbuf; /* * First <residual> buffers get (base+1) physical pages * allocated for them. The rest get (base) physical pages. * * The rest of each buffer occupies virtual space, * but has no physical memory allocated for it. */ curbuf = (vm_offset_t)buffers + i * MAXBSIZE; curbufsize = CLBYTES * (i < residual ? base+1 : base); vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE); vm_map_simplify(buffer_map, curbuf); } /* * Allocate a submap for exec arguments. This map effectively * limits the number of processes exec'ing at any time. */ exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 16*NCARGS, TRUE); /* * Allocate a map for physio. Others use a submap of the kernel * map, but we want one completely separate, even though it uses * the same pmap. */ phys_map = vm_map_create(kernel_pmap, DVMA_BASE, DVMA_END, 1); if (phys_map == NULL) panic("unable to create DVMA map"); /* * Finally, allocate mbuf pool. Since mclrefcnt is an off-size * we use the more space efficient malloc in place of kmem_alloc. */ mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES, M_MBUF, M_NOWAIT); bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES); mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr, VM_MBUF_SIZE, FALSE); /* * Initialize callouts */ callfree = callout; for (i = 1; i < ncallout; i++) callout[i-1].c_next = &callout[i]; callout[i-1].c_next = NULL; #ifdef DEBUG pmapdebug = opmapdebug; #endif printf("avail mem = %d\n", ptoa(cnt.v_free_count)); printf("using %d buffers containing %d bytes of memory\n", nbuf, bufpages * CLBYTES); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); /* * Configure the system, then turn on the cache. Should be able * to do this earlier, but then esp.c fails on SS1+ boxes (??). */ configure(); cache_enable(); }
kern_return_t kernel_memory_allocate( register vm_map_t map, register vm_offset_t *addrp, register vm_size_t size, register vm_offset_t mask, int flags) { vm_object_t object; vm_object_offset_t offset; vm_object_offset_t pg_offset; vm_map_entry_t entry; vm_map_offset_t map_addr, fill_start; vm_map_offset_t map_mask; vm_map_size_t map_size, fill_size; kern_return_t kr; vm_page_t mem; vm_page_t guard_page_list = NULL; vm_page_t wired_page_list = NULL; int guard_page_count = 0; int wired_page_count = 0; int i; int vm_alloc_flags; if (! vm_kernel_ready) { panic("kernel_memory_allocate: VM is not ready"); } if (size == 0) { *addrp = 0; return KERN_INVALID_ARGUMENT; } map_size = vm_map_round_page(size); map_mask = (vm_map_offset_t) mask; vm_alloc_flags = 0; /* * limit the size of a single extent of wired memory * to try and limit the damage to the system if * too many pages get wired down */ if (map_size > (1 << 30)) { return KERN_RESOURCE_SHORTAGE; } /* * Guard pages: * * Guard pages are implemented as ficticious pages. By placing guard pages * on either end of a stack, they can help detect cases where a thread walks * off either end of its stack. They are allocated and set up here and attempts * to access those pages are trapped in vm_fault_page(). * * The map_size we were passed may include extra space for * guard pages. If those were requested, then back it out of fill_size * since vm_map_find_space() takes just the actual size not including * guard pages. Similarly, fill_start indicates where the actual pages * will begin in the range. */ fill_start = 0; fill_size = map_size; if (flags & KMA_GUARD_FIRST) { vm_alloc_flags |= VM_FLAGS_GUARD_BEFORE; fill_start += PAGE_SIZE_64; fill_size -= PAGE_SIZE_64; if (map_size < fill_start + fill_size) { /* no space for a guard page */ *addrp = 0; return KERN_INVALID_ARGUMENT; } guard_page_count++; } if (flags & KMA_GUARD_LAST) { vm_alloc_flags |= VM_FLAGS_GUARD_AFTER; fill_size -= PAGE_SIZE_64; if (map_size <= fill_start + fill_size) { /* no space for a guard page */ *addrp = 0; return KERN_INVALID_ARGUMENT; } guard_page_count++; } wired_page_count = (int) (fill_size / PAGE_SIZE_64); assert(wired_page_count * PAGE_SIZE_64 == fill_size); for (i = 0; i < guard_page_count; i++) { for (;;) { mem = vm_page_grab_guard(); if (mem != VM_PAGE_NULL) break; if (flags & KMA_NOPAGEWAIT) { kr = KERN_RESOURCE_SHORTAGE; goto out; } vm_page_more_fictitious(); } mem->pageq.next = (queue_entry_t)guard_page_list; guard_page_list = mem; } for (i = 0; i < wired_page_count; i++) { uint64_t unavailable; for (;;) { if (flags & KMA_LOMEM) mem = vm_page_grablo(); else mem = vm_page_grab(); if (mem != VM_PAGE_NULL) break; if (flags & KMA_NOPAGEWAIT) { kr = KERN_RESOURCE_SHORTAGE; goto out; } if ((flags & KMA_LOMEM) && (vm_lopage_needed == TRUE)) { kr = KERN_RESOURCE_SHORTAGE; goto out; } unavailable = (vm_page_wire_count + vm_page_free_target) * PAGE_SIZE; if (unavailable > max_mem || map_size > (max_mem - unavailable)) { kr = KERN_RESOURCE_SHORTAGE; goto out; } VM_PAGE_WAIT(); } mem->pageq.next = (queue_entry_t)wired_page_list; wired_page_list = mem; } /* * Allocate a new object (if necessary). We must do this before * locking the map, or risk deadlock with the default pager. */ if ((flags & KMA_KOBJECT) != 0) { object = kernel_object; vm_object_reference(object); } else { object = vm_object_allocate(map_size); } kr = vm_map_find_space(map, &map_addr, fill_size, map_mask, vm_alloc_flags, &entry); if (KERN_SUCCESS != kr) { vm_object_deallocate(object); goto out; } entry->object.vm_object = object; entry->offset = offset = (object == kernel_object) ? map_addr : 0; entry->wired_count++; if (flags & KMA_PERMANENT) entry->permanent = TRUE; if (object != kernel_object) vm_object_reference(object); vm_object_lock(object); vm_map_unlock(map); pg_offset = 0; if (fill_start) { if (guard_page_list == NULL) panic("kernel_memory_allocate: guard_page_list == NULL"); mem = guard_page_list; guard_page_list = (vm_page_t)mem->pageq.next; mem->pageq.next = NULL; vm_page_insert(mem, object, offset + pg_offset); mem->busy = FALSE; pg_offset += PAGE_SIZE_64; } for (pg_offset = fill_start; pg_offset < fill_start + fill_size; pg_offset += PAGE_SIZE_64) { if (wired_page_list == NULL) panic("kernel_memory_allocate: wired_page_list == NULL"); mem = wired_page_list; wired_page_list = (vm_page_t)mem->pageq.next; mem->pageq.next = NULL; mem->wire_count++; vm_page_insert(mem, object, offset + pg_offset); mem->busy = FALSE; mem->pmapped = TRUE; mem->wpmapped = TRUE; PMAP_ENTER(kernel_pmap, map_addr + pg_offset, mem, VM_PROT_READ | VM_PROT_WRITE, object->wimg_bits & VM_WIMG_MASK, TRUE); if (flags & KMA_NOENCRYPT) { bzero(CAST_DOWN(void *, (map_addr + pg_offset)), PAGE_SIZE); pmap_set_noencrypt(mem->phys_page); } }
void cpu_startup() { struct pdc_model pdc_model; register const struct hppa_board_info *bip; vm_offset_t minaddr, maxaddr; vm_size_t size; int base, residual; int err, i; #ifdef DEBUG extern int pmapdebug; int opmapdebug = pmapdebug; pmapdebug = 0; #endif /* good night */ printf(version); /* identify system type */ if ((err = pdc_call((iodcio_t)pdc, 0, PDC_MODEL, PDC_MODEL_INFO, &pdc_model)) < 0) { #ifdef DEBUG printf("WARNING: PDC_MODEL failed (%d)\n", err); #endif } else { i = pdc_model.hvers >> 4; /* board type */ for (bip = hppa_knownboards; bip->bi_id >= 0 && bip->bi_id != i; bip++); if (bip->bi_id >= 0) { char *p; switch(pdc_model.arch_rev) { case 0: p = "1.0"; break; case 4: p = "1.1"; break; case 8: p = "2.0"; break; default: p = "?.?"; break; } /* my babe said: 6010, 481, 0, 0, 77b657b1, 0, 4 */ sprintf(cpu_model, "HP9000/%s PA-RISC %s", bip->bi_name, p); } else sprintf(cpu_model, "HP9000/(UNKNOWN %x)", i); printf("%s\n", cpu_model); } printf("real mem = %d (%d reserved for PROM, %d used by OpenBSD)\n", ctob(totalphysmem), ctob(resvmem), ctob(physmem)); /* * Now allocate buffers proper. They are different than the above * in that they usually occupy more virtual memory than physical. */ size = MAXBSIZE * nbuf; buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers, &maxaddr, size, TRUE); minaddr = (vm_offset_t)buffers; if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0, &minaddr, size, FALSE) != KERN_SUCCESS) panic("cpu_startup: cannot allocate buffers"); base = bufpages / nbuf; residual = bufpages % nbuf; for (i = 0; i < nbuf; i++) { /* * First <residual> buffers get (base+1) physical pages * allocated for them. The rest get (base) physical pages. * * The rest of each buffer occupies virtual space, * but has no physical memory allocated for it. */ vm_map_pageable(buffer_map, minaddr, minaddr + CLBYTES * (base + (i < residual)), FALSE); vm_map_simplify(buffer_map, minaddr); minaddr += MAXBSIZE; } /* * Allocate a submap for exec arguments. This map effectively * limits the number of processes exec'ing at any time. */ exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 16*NCARGS, TRUE); /* * Allocate a submap for physio */ phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, TRUE); /* * Finally, allocate mbuf pool. Since mclrefcnt is an off-size * we use the more space efficient malloc in place of kmem_alloc. */ mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES, M_MBUF, M_NOWAIT); bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES); mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr, VM_MBUF_SIZE, FALSE); /* * Initialize callouts */ callfree = callout; for (i = 1; i < ncallout; i++) callout[i-1].c_next = &callout[i]; callout[i-1].c_next = NULL; #ifdef DEBUG pmapdebug = opmapdebug; #endif printf("avail mem = %ld\n", ptoa(cnt.v_free_count)); printf("using %d buffers containing %d bytes of memory\n", nbuf, bufpages * CLBYTES); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); /* * Configure the system. */ if (boothowto & RB_CONFIG) { #ifdef BOOT_CONFIG user_config(); #else printf("kernel does not support -c; continuing..\n"); #endif } hppa_malloc_ok = 1; configure(); }
/* * Routine: * vm_fault_copy_entry * Function: * Create new shadow object backing dst_entry with private copy of * all underlying pages. When src_entry is equal to dst_entry, * function implements COW for wired-down map entry. Otherwise, * it forks wired entry into dst_map. * * In/out conditions: * The source and destination maps must be locked for write. * The source map entry must be wired down (or be a sharing map * entry corresponding to a main map entry that is wired down). */ void vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, vm_map_entry_t dst_entry, vm_map_entry_t src_entry, vm_ooffset_t *fork_charge) { vm_object_t backing_object, dst_object, object, src_object; vm_pindex_t dst_pindex, pindex, src_pindex; vm_prot_t access, prot; vm_offset_t vaddr; vm_page_t dst_m; vm_page_t src_m; boolean_t src_readonly, upgrade; #ifdef lint src_map++; #endif /* lint */ upgrade = src_entry == dst_entry; src_object = src_entry->object.vm_object; src_pindex = OFF_TO_IDX(src_entry->offset); src_readonly = (src_entry->protection & VM_PROT_WRITE) == 0; /* * Create the top-level object for the destination entry. (Doesn't * actually shadow anything - we copy the pages directly.) */ dst_object = vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(dst_entry->end - dst_entry->start)); #if VM_NRESERVLEVEL > 0 dst_object->flags |= OBJ_COLORED; dst_object->pg_color = atop(dst_entry->start); #endif VM_OBJECT_WLOCK(dst_object); KASSERT(upgrade || dst_entry->object.vm_object == NULL, ("vm_fault_copy_entry: vm_object not NULL")); dst_entry->object.vm_object = dst_object; dst_entry->offset = 0; dst_object->charge = dst_entry->end - dst_entry->start; if (fork_charge != NULL) { KASSERT(dst_entry->cred == NULL, ("vm_fault_copy_entry: leaked swp charge")); dst_object->cred = curthread->td_ucred; crhold(dst_object->cred); *fork_charge += dst_object->charge; } else { dst_object->cred = dst_entry->cred; dst_entry->cred = NULL; } access = prot = dst_entry->protection; /* * If not an upgrade, then enter the mappings in the pmap as * read and/or execute accesses. Otherwise, enter them as * write accesses. * * A writeable large page mapping is only created if all of * the constituent small page mappings are modified. Marking * PTEs as modified on inception allows promotion to happen * without taking potentially large number of soft faults. */ if (!upgrade) access &= ~VM_PROT_WRITE; /* * Loop through all of the virtual pages within the entry's * range, copying each page from the source object to the * destination object. Since the source is wired, those pages * must exist. In contrast, the destination is pageable. * Since the destination object does share any backing storage * with the source object, all of its pages must be dirtied, * regardless of whether they can be written. */ for (vaddr = dst_entry->start, dst_pindex = 0; vaddr < dst_entry->end; vaddr += PAGE_SIZE, dst_pindex++) { /* * Allocate a page in the destination object. */ do { dst_m = vm_page_alloc(dst_object, dst_pindex, VM_ALLOC_NORMAL); if (dst_m == NULL) { VM_OBJECT_WUNLOCK(dst_object); VM_WAIT; VM_OBJECT_WLOCK(dst_object); } } while (dst_m == NULL); /* * Find the page in the source object, and copy it in. * (Because the source is wired down, the page will be in * memory.) */ VM_OBJECT_WLOCK(src_object); object = src_object; pindex = src_pindex + dst_pindex; while ((src_m = vm_page_lookup(object, pindex)) == NULL && src_readonly && (backing_object = object->backing_object) != NULL) { /* * Allow fallback to backing objects if we are reading. */ VM_OBJECT_WLOCK(backing_object); pindex += OFF_TO_IDX(object->backing_object_offset); VM_OBJECT_WUNLOCK(object); object = backing_object; } if (src_m == NULL) panic("vm_fault_copy_wired: page missing"); pmap_copy_page(src_m, dst_m); VM_OBJECT_WUNLOCK(object); dst_m->valid = VM_PAGE_BITS_ALL; dst_m->dirty = VM_PAGE_BITS_ALL; VM_OBJECT_WUNLOCK(dst_object); /* * Enter it in the pmap. If a wired, copy-on-write * mapping is being replaced by a write-enabled * mapping, then wire that new mapping. */ pmap_enter(dst_map->pmap, vaddr, access, dst_m, prot, upgrade); /* * Mark it no longer busy, and put it on the active list. */ VM_OBJECT_WLOCK(dst_object); if (upgrade) { vm_page_lock(src_m); vm_page_unwire(src_m, 0); vm_page_unlock(src_m); vm_page_lock(dst_m); vm_page_wire(dst_m); vm_page_unlock(dst_m); } else { vm_page_lock(dst_m); vm_page_activate(dst_m); vm_page_unlock(dst_m); } vm_page_wakeup(dst_m); } VM_OBJECT_WUNLOCK(dst_object); if (upgrade) { dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY); vm_object_deallocate(src_object); } }
kern_return_t kmem_alloc_contig( vm_map_t map, vm_offset_t *addrp, vm_size_t size, vm_offset_t mask, ppnum_t max_pnum, ppnum_t pnum_mask, int flags) { vm_object_t object; vm_object_offset_t offset; vm_map_offset_t map_addr; vm_map_offset_t map_mask; vm_map_size_t map_size, i; vm_map_entry_t entry; vm_page_t m, pages; kern_return_t kr; if (map == VM_MAP_NULL || (flags & ~(KMA_KOBJECT | KMA_LOMEM | KMA_NOPAGEWAIT))) return KERN_INVALID_ARGUMENT; if (size == 0) { *addrp = 0; return KERN_INVALID_ARGUMENT; } map_size = vm_map_round_page(size); map_mask = (vm_map_offset_t)mask; /* * Allocate a new object (if necessary) and the reference we * will be donating to the map entry. We must do this before * locking the map, or risk deadlock with the default pager. */ if ((flags & KMA_KOBJECT) != 0) { object = kernel_object; vm_object_reference(object); } else { object = vm_object_allocate(map_size); } kr = vm_map_find_space(map, &map_addr, map_size, map_mask, 0, &entry); if (KERN_SUCCESS != kr) { vm_object_deallocate(object); return kr; } entry->object.vm_object = object; entry->offset = offset = (object == kernel_object) ? map_addr : 0; /* Take an extra object ref in case the map entry gets deleted */ vm_object_reference(object); vm_map_unlock(map); kr = cpm_allocate(CAST_DOWN(vm_size_t, map_size), &pages, max_pnum, pnum_mask, FALSE, flags); if (kr != KERN_SUCCESS) { vm_map_remove(map, vm_map_trunc_page(map_addr), vm_map_round_page(map_addr + map_size), 0); vm_object_deallocate(object); *addrp = 0; return kr; } vm_object_lock(object); for (i = 0; i < map_size; i += PAGE_SIZE) { m = pages; pages = NEXT_PAGE(m); *(NEXT_PAGE_PTR(m)) = VM_PAGE_NULL; m->busy = FALSE; vm_page_insert(m, object, offset + i); } vm_object_unlock(object); if ((kr = vm_map_wire(map, vm_map_trunc_page(map_addr), vm_map_round_page(map_addr + map_size), VM_PROT_DEFAULT, FALSE)) != KERN_SUCCESS) { if (object == kernel_object) { vm_object_lock(object); vm_object_page_remove(object, offset, offset + map_size); vm_object_unlock(object); } vm_map_remove(map, vm_map_trunc_page(map_addr), vm_map_round_page(map_addr + map_size), 0); vm_object_deallocate(object); return kr; } vm_object_deallocate(object); if (object == kernel_object) vm_map_simplify(map, map_addr); *addrp = (vm_offset_t) map_addr; assert((vm_map_offset_t) *addrp == map_addr); return KERN_SUCCESS; }