static int vm_region_expand( struct vm_region *vmr, unsigned npages ) { unsigned new_pages; int res; unsigned i; unsigned old_pages; old_pages = vm_page_array_num( vmr->vmr_pages ); new_pages = npages - old_pages; //trivial case, nothing to do. if( new_pages == 0 ) return 0; //see if we can back this loan with storage. res = swap_reserve( new_pages ); if( res ) return res; //attempt to rezize the vmr_pages array. res = vm_page_array_setsize( vmr->vmr_pages, npages ); if( res ) { swap_unreserve( new_pages ); return res; } //initialize each of the newly created vm_pages to NULL. for( i = old_pages; i < npages; ++i ) vm_page_array_set( vmr->vmr_pages, i, NULL ); return 0; }
/* * vm_object_setsize: change the size of a vm_object. */ int vm_object_setsize(struct addrspace *as, struct vm_object *vmo, unsigned npages) { int result; unsigned i; struct lpage *lp; KASSERT(vmo != NULL); KASSERT(vmo->vmo_lpages != NULL); if (npages < lpage_array_num(vmo->vmo_lpages)) { for (i=npages; i<lpage_array_num(vmo->vmo_lpages); i++) { lp = lpage_array_get(vmo->vmo_lpages, i); if (lp != NULL) { KASSERT(as != NULL); /* remove any tlb entry for this mapping */ mmu_unmap(as, vmo->vmo_base+PAGE_SIZE*i); lpage_destroy(lp); } else { swap_unreserve(1); } } result = lpage_array_setsize(vmo->vmo_lpages, npages); /* shrinking an array shouldn't fail */ KASSERT(result==0); } else if (npages > lpage_array_num(vmo->vmo_lpages)) { int oldsize = lpage_array_num(vmo->vmo_lpages); unsigned newpages = npages - oldsize; result = swap_reserve(newpages); if (result) { return result; } result = lpage_array_setsize(vmo->vmo_lpages, npages); if (result) { swap_unreserve(newpages); return result; } for (i=oldsize; i<npages; i++) { lpage_array_set(vmo->vmo_lpages, i, NULL); } } return 0; }
struct vm_region * vm_region_create( size_t npages ) { int res; struct vm_region *vmr; unsigned i; int err; //see if we can reserve npages of swap first. err = swap_reserve( npages ); if( err ) return NULL; //attempt to create the vm_region vmr = kmalloc( sizeof( struct vm_region ) ); if( vmr == NULL ) return NULL; //create the vm_pages. vmr->vmr_pages = vm_page_array_create(); if( vmr->vmr_pages == NULL ) { kfree( vmr ); swap_unreserve( npages ); return NULL; } //set the base address to point to an invalid virtual address. vmr->vmr_base = 0xdeadbeef; //adjust the array to hold npages. res = vm_page_array_setsize( vmr->vmr_pages, npages ); if( res ) { vm_page_array_destroy( vmr->vmr_pages ); kfree( vmr ); swap_unreserve( npages ); return NULL; } //initialize all the pages to NULL. for( i = 0; i < npages; ++i ) vm_page_array_set( vmr->vmr_pages, i, NULL ); return vmr; }
/* * vm_object_create: Allocate a new vm_object with nothing in it. * Returns: new vm_object on success, NULL on error. */ struct vm_object * vm_object_create(size_t npages) { struct vm_object *vmo; unsigned i; int result; result = swap_reserve(npages); if (result != 0) { return NULL; } vmo = kmalloc(sizeof(struct vm_object)); if (vmo == NULL) { swap_unreserve(npages); return NULL; } vmo->vmo_lpages = lpage_array_create(); if (vmo->vmo_lpages == NULL) { kfree(vmo); swap_unreserve(npages); return NULL; } vmo->vmo_base = 0xdeafbeef; /* make sure these */ vmo->vmo_lower_redzone = 0xdeafbeef; /* get filled in later */ /* add the requested number of zerofilled pages */ result = lpage_array_setsize(vmo->vmo_lpages, npages); if (result) { lpage_array_destroy(vmo->vmo_lpages); kfree(vmo); swap_unreserve(npages); return NULL; } for (i=0; i<npages; i++) { lpage_array_set(vmo->vmo_lpages, i, NULL); } return vmo; }
static int vm_region_shrink( struct vm_region *vmr, unsigned npages ) { unsigned i; struct vm_page *vmp; for( i = npages; i < vm_page_array_num( vmr->vmr_pages ); ++i ) { vmp = vm_page_array_get( vmr->vmr_pages, i ); if( vmp == NULL ) { swap_unreserve( 1 ); continue; } //unmap tlb entries. vm_unmap( vmr->vmr_base + PAGE_SIZE * i ); //destroy the page. vm_page_destroy( vmp ); } return vm_page_array_setsize( vmr->vmr_pages, npages ); }