static void put_l2_table(arch_aspace_t *aspace, uint32_t l1_index, paddr_t l2_pa) { DEBUG_ASSERT(aspace); /* check if any l1 entry points to this l2 table */ for (uint i = 0; i < L1E_PER_PAGE; i++) { uint32_t tt_entry = aspace->tt_virt[ROUNDDOWN(l1_index, L1E_PER_PAGE) + i]; if ((tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK) == MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE) { return; } } /* we can free this l2 table */ vm_page_t *page = paddr_to_vm_page(l2_pa); if (!page) panic("bad page table paddr 0x%lx\n", l2_pa); /* verify that it is in our page list */ DEBUG_ASSERT(list_in_list(&page->node)); list_delete(&page->node); LTRACEF("freeing pagetable at 0x%lx\n", l2_pa); pmm_free_page(page); }
static void dt_entry_list_delete(struct dt_entry_node *dt_node_member) { if (list_in_list(&dt_node_member->node)) { list_delete(&dt_node_member->node); free(dt_node_member->dt_entry_m); free(dt_node_member); } }
void PagerSource::SwapRequest(page_request_t* old, page_request_t* new_req) { Guard<fbl::Mutex> guard{&mtx_}; ASSERT(!closed_); if (list_in_list(&old->node)) { list_replace_node(&old->node, &new_req->node); } else if (old == active_request_) { active_request_ = new_req; } }
void timer_delete(timer_list_t *timer) { enter_critical_section(); if (list_in_list(&timer->node)) { list_delete(&timer->node); } exit_critical_section(); }
void PagerSource::ClearAsyncRequest(page_request_t* request) { Guard<fbl::Mutex> guard{&mtx_}; ASSERT(!closed_); if (request == active_request_) { // Condition on whether or not we atually cancel the packet, to make sure // we don't race with a call to PagerSource::Free. if (port_->CancelQueued(&packet_)) { OnPacketFreedLocked(); } } else if (list_in_list(&request->node)) { list_delete(&request->node); } }
bool timer_pending(const timer_list_t *timer) { return list_in_list(&timer->node); }