/*! Called by UnmapPage() after performing the architecture specific part. Looks up the page, updates its flags, removes the page-area mapping, and requeues the page, if necessary. */ void VMTranslationMap::PageUnmapped(VMArea* area, page_num_t pageNumber, bool accessed, bool modified, bool updatePageQueue) { if (area->cache_type == CACHE_TYPE_DEVICE) { recursive_lock_unlock(&fLock); return; } // get the page vm_page* page = vm_lookup_page(pageNumber); ASSERT_PRINT(page != NULL, "page number: %#" B_PRIxPHYSADDR ", accessed: %d, modified: %d", pageNumber, accessed, modified); // transfer the accessed/dirty flags to the page page->accessed |= accessed; page->modified |= modified; // remove the mapping object/decrement the wired_count of the page vm_page_mapping* mapping = NULL; if (area->wiring == B_NO_LOCK) { vm_page_mappings::Iterator iterator = page->mappings.GetIterator(); while ((mapping = iterator.Next()) != NULL) { if (mapping->area == area) { area->mappings.Remove(mapping); page->mappings.Remove(mapping); break; } } ASSERT_PRINT(mapping != NULL, "page: %p, page number: %#" B_PRIxPHYSADDR ", accessed: %d, modified: %d", page, pageNumber, accessed, modified); } else page->DecrementWiredCount(); recursive_lock_unlock(&fLock); if (!page->IsMapped()) { atomic_add(&gMappedPagesCount, -1); if (updatePageQueue) { if (page->Cache()->temporary) vm_page_set_state(page, PAGE_STATE_INACTIVE); else if (page->modified) vm_page_set_state(page, PAGE_STATE_MODIFIED); else vm_page_set_state(page, PAGE_STATE_CACHED); } } if (mapping != NULL) { bool isKernelSpace = area->address_space == VMAddressSpace::Kernel(); object_cache_free(gPageMappingsObjectCache, mapping, CACHE_DONT_WAIT_FOR_MEMORY | (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0)); } }
/*! Calls low resource handlers for the given resources. sLowResourceLock must be held. */ static void call_handlers(uint32 lowResources) { if (sLowResourceHandlers.IsEmpty()) return; // Add a marker, so we can drop the lock while calling the handlers and // still iterate safely. low_resource_handler marker; sLowResourceHandlers.Insert(&marker, false); while (low_resource_handler* handler = sLowResourceHandlers.GetNext(&marker)) { // swap with handler sLowResourceHandlers.Swap(&marker, handler); marker.priority = handler->priority; int32 resources = handler->resources & lowResources; if (resources != 0) { recursive_lock_unlock(&sLowResourceLock); handler->function(handler->data, resources, low_resource_state_no_update(resources)); recursive_lock_lock(&sLowResourceLock); } } // remove marker sLowResourceHandlers.Remove(&marker); }
static int unlock_tmap(vm_translation_map *map) { TMAP_TRACE("unlock_tmap: map %p\n", map); if(recursive_lock_get_recursion(&map->lock) == 1) { // we're about to release it for the last time } recursive_lock_unlock(&map->lock); return 0; }
/*! Called by ClearAccessedAndModified() after performing the architecture specific part. Looks up the page and removes the page-area mapping. */ void VMTranslationMap::UnaccessedPageUnmapped(VMArea* area, page_num_t pageNumber) { if (area->cache_type == CACHE_TYPE_DEVICE) { recursive_lock_unlock(&fLock); return; } // get the page vm_page* page = vm_lookup_page(pageNumber); ASSERT_PRINT(page != NULL, "page number: %#" B_PRIxPHYSADDR, pageNumber); // remove the mapping object/decrement the wired_count of the page vm_page_mapping* mapping = NULL; if (area->wiring == B_NO_LOCK) { vm_page_mappings::Iterator iterator = page->mappings.GetIterator(); while ((mapping = iterator.Next()) != NULL) { if (mapping->area == area) { area->mappings.Remove(mapping); page->mappings.Remove(mapping); break; } } ASSERT_PRINT(mapping != NULL, "page: %p, page number: %#" B_PRIxPHYSADDR, page, pageNumber); } else page->DecrementWiredCount(); recursive_lock_unlock(&fLock); if (!page->IsMapped()) atomic_add(&gMappedPagesCount, -1); if (mapping != NULL) { object_cache_free(gPageMappingsObjectCache, mapping, CACHE_DONT_WAIT_FOR_MEMORY | CACHE_DONT_LOCK_KERNEL_SPACE); // Since this is called by the page daemon, we never want to lock // the kernel address space. } }
/*! Unlocks the map, and, if we are actually losing the recursive lock, flush all pending changes of this map (ie. flush TLB caches as needed). */ void M68KVMTranslationMap::Unlock() { TRACE("%p->M68KVMTranslationMap::Unlock()\n", this); if (recursive_lock_get_recursion(&fLock) == 1) { // we're about to release it for the last time Flush(); } recursive_lock_unlock(&fLock); }
void Attribute::Put() { if (fBodyEntry != NULL) { recursive_lock_unlock(&fInode->SmallDataLock()); fBlock.Unset(); fBodyEntry = NULL; } if (fBlockEntry != NULL) { fBlock.Unset(); fBlockEntry = NULL; } }
int32 low_resource_state(uint32 resources) { recursive_lock_lock(&sLowResourceLock); if (system_time() - sLastMeasurement > 500000) compute_state(); int32 state = low_resource_state_no_update(resources); recursive_lock_unlock(&sLowResourceLock); return state; }
status_t Attribute::_Find(const char* name, int32 index) { Put(); fName = name; // try to find it in the small data region if (fInode->HasExtraAttributes() && recursive_lock_lock(&fInode->SmallDataLock()) == B_OK) { off_t blockNum; fVolume->GetInodeBlock(fInode->ID(), blockNum); if (blockNum != 0) { fBlock.SetTo(blockNum); const uint8* start = fBlock.Block() + fVolume->InodeBlockIndex(fInode->ID()) * fVolume->InodeSize(); const uint8* end = start + fVolume->InodeSize(); int32 count = 0; if (_FindAttributeBody(start + EXT2_INODE_NORMAL_SIZE + fInode->Node().ExtraInodeSize(), end, name, index, &count, &fBodyEntry) == B_OK) return B_OK; index -= count; } recursive_lock_unlock(&fInode->SmallDataLock()); fBlock.Unset(); } // then, search in the attribute directory if (fInode->Node().ExtendedAttributesBlock() != 0) { fBlock.SetTo(fInode->Node().ExtendedAttributesBlock()); if (_FindAttributeBlock(fBlock.Block(), fBlock.Block() + fVolume->BlockSize(), name, index, NULL, &fBlockEntry) == B_OK) return B_OK; fBlock.Unset(); } return B_ENTRY_NOT_FOUND; }
static inline void rld_unlock() { recursive_lock_unlock(&sLock); }