void amap_wipeout(struct vm_amap *amap) { int lcv, slot; struct vm_anon *anon; KASSERT(amap->am_ref == 0); if (__predict_false((amap->am_flags & AMAP_SWAPOFF) != 0)) { /* amap_swap_off will call us again. */ return; } amap_list_remove(amap); for (lcv = 0 ; lcv < amap->am_nused ; lcv++) { int refs; slot = amap->am_slots[lcv]; anon = amap->am_anon[slot]; if (anon == NULL || anon->an_ref == 0) panic("amap_wipeout: corrupt amap"); refs = --anon->an_ref; if (refs == 0) { /* we had the last reference to a vm_anon. free it. */ uvm_anfree(anon); } } /* now we free the map */ amap->am_ref = 0; /* ... was one */ amap->am_nused = 0; amap_free(amap); /* will free amap */ }
void amap_wipeout(struct vm_amap *amap) { int lcv, slot; struct vm_anon *anon; UVMHIST_FUNC("amap_wipeout"); UVMHIST_CALLED(maphist); UVMHIST_LOG(maphist,"(amap=0x%x)", amap, 0,0,0); KASSERT(amap->am_ref == 0); if (__predict_false((amap->am_flags & AMAP_SWAPOFF) != 0)) { /* * amap_swap_off will call us again. */ amap_unlock(amap); return; } amap_list_remove(amap); amap_unlock(amap); for (lcv = 0 ; lcv < amap->am_nused ; lcv++) { int refs; slot = amap->am_slots[lcv]; anon = amap->am_anon[slot]; if (anon == NULL || anon->an_ref == 0) panic("amap_wipeout: corrupt amap"); mutex_enter(&anon->an_lock); UVMHIST_LOG(maphist," processing anon 0x%x, ref=%d", anon, anon->an_ref, 0, 0); refs = --anon->an_ref; mutex_exit(&anon->an_lock); if (refs == 0) { /* * we had the last reference to a vm_anon. free it. */ uvm_anfree(anon); } if (curlwp->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) preempt(); } /* * now we free the map */ amap->am_nused = 0; amap_free(amap); /* will unlock and free amap */ UVMHIST_LOG(maphist,"<- done!", 0,0,0,0); }
void amap_copy(struct vm_map *map, struct vm_map_entry *entry, int flags, vaddr_t startva, vaddr_t endva) { struct vm_amap *amap, *srcamap; int slots, lcv; vaddr_t chunksize; const int waitf = (flags & AMAP_COPY_NOWAIT) ? UVM_FLAG_NOWAIT : 0; const bool canchunk = (flags & AMAP_COPY_NOCHUNK) == 0; UVMHIST_FUNC("amap_copy"); UVMHIST_CALLED(maphist); UVMHIST_LOG(maphist, " (map=%p, entry=%p, flags=%d)", map, entry, flags, 0); KASSERT(map != kernel_map); /* we use nointr pool */ /* * is there a map to copy? if not, create one from scratch. */ if (entry->aref.ar_amap == NULL) { /* * check to see if we have a large amap that we can * chunk. we align startva/endva to chunk-sized * boundaries and then clip to them. */ if (canchunk && atop(entry->end - entry->start) >= UVM_AMAP_LARGE) { /* convert slots to bytes */ chunksize = UVM_AMAP_CHUNK << PAGE_SHIFT; startva = (startva / chunksize) * chunksize; endva = roundup(endva, chunksize); UVMHIST_LOG(maphist, " chunk amap ==> clip 0x%x->0x%x" "to 0x%x->0x%x", entry->start, entry->end, startva, endva); UVM_MAP_CLIP_START(map, entry, startva, NULL); /* watch out for endva wrap-around! */ if (endva >= startva) UVM_MAP_CLIP_END(map, entry, endva, NULL); } if ((flags & AMAP_COPY_NOMERGE) == 0 && uvm_mapent_trymerge(map, entry, UVM_MERGE_COPYING)) { return; } UVMHIST_LOG(maphist, "<- done [creating new amap 0x%x->0x%x]", entry->start, entry->end, 0, 0); entry->aref.ar_pageoff = 0; entry->aref.ar_amap = amap_alloc(entry->end - entry->start, 0, waitf); if (entry->aref.ar_amap != NULL) entry->etype &= ~UVM_ET_NEEDSCOPY; return; } /* * first check and see if we are the only map entry * referencing the amap we currently have. if so, then we can * just take it over rather than copying it. note that we are * reading am_ref with the amap unlocked... the value can only * be one if we have the only reference to the amap (via our * locked map). if we are greater than one we fall through to * the next case (where we double check the value). */ if (entry->aref.ar_amap->am_ref == 1) { entry->etype &= ~UVM_ET_NEEDSCOPY; UVMHIST_LOG(maphist, "<- done [ref cnt = 1, took it over]", 0, 0, 0, 0); return; } /* * looks like we need to copy the map. */ UVMHIST_LOG(maphist," amap=%p, ref=%d, must copy it", entry->aref.ar_amap, entry->aref.ar_amap->am_ref, 0, 0); AMAP_B2SLOT(slots, entry->end - entry->start); amap = amap_alloc1(slots, 0, waitf); if (amap == NULL) { UVMHIST_LOG(maphist, " amap_alloc1 failed", 0,0,0,0); return; } srcamap = entry->aref.ar_amap; amap_lock(srcamap); /* * need to double check reference count now that we've got the * src amap locked down. the reference count could have * changed while we were in malloc. if the reference count * dropped down to one we take over the old map rather than * copying the amap. */ if (srcamap->am_ref == 1) { /* take it over? */ entry->etype &= ~UVM_ET_NEEDSCOPY; amap->am_ref--; /* drop final reference to map */ amap_free(amap); /* dispose of new (unused) amap */ amap_unlock(srcamap); return; } /* * we must copy it now. */ UVMHIST_LOG(maphist, " copying amap now",0, 0, 0, 0); for (lcv = 0 ; lcv < slots; lcv++) { amap->am_anon[lcv] = srcamap->am_anon[entry->aref.ar_pageoff + lcv]; if (amap->am_anon[lcv] == NULL) continue; mutex_enter(&amap->am_anon[lcv]->an_lock); amap->am_anon[lcv]->an_ref++; mutex_exit(&amap->am_anon[lcv]->an_lock); amap->am_bckptr[lcv] = amap->am_nused; amap->am_slots[amap->am_nused] = lcv; amap->am_nused++; } memset(&amap->am_anon[lcv], 0, (amap->am_maxslot - lcv) * sizeof(struct vm_anon *)); /* * drop our reference to the old amap (srcamap) and unlock. * we know that the reference count on srcamap is greater than * one (we checked above), so there is no way we could drop * the count to zero. [and no need to worry about freeing it] */ srcamap->am_ref--; if (srcamap->am_ref == 1 && (srcamap->am_flags & AMAP_SHARED) != 0) srcamap->am_flags &= ~AMAP_SHARED; /* clear shared flag */ #ifdef UVM_AMAP_PPREF if (srcamap->am_ppref && srcamap->am_ppref != PPREF_NONE) { amap_pp_adjref(srcamap, entry->aref.ar_pageoff, (entry->end - entry->start) >> PAGE_SHIFT, -1); }
void amap_copy(struct vm_map *map, struct vm_map_entry *entry, int waitf, boolean_t canchunk, vaddr_t startva, vaddr_t endva) { struct vm_amap *amap, *srcamap; int slots, lcv; vaddr_t chunksize; /* is there a map to copy? if not, create one from scratch. */ if (entry->aref.ar_amap == NULL) { /* * check to see if we have a large amap that we can * chunk. we align startva/endva to chunk-sized * boundaries and then clip to them. */ if (canchunk && atop(entry->end - entry->start) >= UVM_AMAP_LARGE) { /* convert slots to bytes */ chunksize = UVM_AMAP_CHUNK << PAGE_SHIFT; startva = (startva / chunksize) * chunksize; endva = roundup(endva, chunksize); UVM_MAP_CLIP_START(map, entry, startva); /* watch out for endva wrap-around! */ if (endva >= startva) UVM_MAP_CLIP_END(map, entry, endva); } entry->aref.ar_pageoff = 0; entry->aref.ar_amap = amap_alloc(entry->end - entry->start, 0, waitf); if (entry->aref.ar_amap != NULL) entry->etype &= ~UVM_ET_NEEDSCOPY; return; } /* * first check and see if we are the only map entry * referencing the amap we currently have. if so, then we can * just take it over rather than copying it. the value can only * be one if we have the only reference to the amap */ if (entry->aref.ar_amap->am_ref == 1) { entry->etype &= ~UVM_ET_NEEDSCOPY; return; } /* looks like we need to copy the map. */ AMAP_B2SLOT(slots, entry->end - entry->start); amap = amap_alloc1(slots, 0, waitf); if (amap == NULL) return; srcamap = entry->aref.ar_amap; /* * need to double check reference count now. the reference count * could have changed while we were in malloc. if the reference count * dropped down to one we take over the old map rather than * copying the amap. */ if (srcamap->am_ref == 1) { /* take it over? */ entry->etype &= ~UVM_ET_NEEDSCOPY; amap->am_ref--; /* drop final reference to map */ amap_free(amap); /* dispose of new (unused) amap */ return; } /* we must copy it now. */ for (lcv = 0 ; lcv < slots; lcv++) { amap->am_anon[lcv] = srcamap->am_anon[entry->aref.ar_pageoff + lcv]; if (amap->am_anon[lcv] == NULL) continue; amap->am_anon[lcv]->an_ref++; amap->am_bckptr[lcv] = amap->am_nused; amap->am_slots[amap->am_nused] = lcv; amap->am_nused++; } memset(&amap->am_anon[lcv], 0, (amap->am_maxslot - lcv) * sizeof(struct vm_anon *)); /* * drop our reference to the old amap (srcamap). * we know that the reference count on srcamap is greater than * one (we checked above), so there is no way we could drop * the count to zero. [and no need to worry about freeing it] */ srcamap->am_ref--; if (srcamap->am_ref == 1 && (srcamap->am_flags & AMAP_SHARED) != 0) srcamap->am_flags &= ~AMAP_SHARED; /* clear shared flag */ #ifdef UVM_AMAP_PPREF if (srcamap->am_ppref && srcamap->am_ppref != PPREF_NONE) { amap_pp_adjref(srcamap, entry->aref.ar_pageoff, (entry->end - entry->start) >> PAGE_SHIFT, -1); }