void aarch64_set_mem_long_double (sim_cpu *cpu, uint64_t address, FRegister a) { TRACE_MEMORY (cpu, "write of long double %" PRIx64 " %" PRIx64 " to %" PRIx64, a.v[0], a.v[1], address); sim_core_write_unaligned_8 (cpu, 0, write_map, address, a.v[0]); sim_core_write_unaligned_8 (cpu, 0, write_map, address + 8, a.v[1]); }
/* * We may have stale swap cache pages in memory: notice * them here and get rid of the unnecessary final write. */ static int swap_writepage(struct page *page) { if (remove_exclusive_swap_page(page)) { UnlockPage(page); return 0; } TRACE_MEMORY(TRACE_EV_MEMORY_SWAP_OUT, (unsigned long) page); rw_swap_page(WRITE, page); return 0; }
ListPage::ListPage(dword dwFlags, Rect rcPosition, short itemHeight, short headerHeight, short footerHeight) { TRACE_MEMORY(); m_dwFlags = dwFlags; m_rcList = rcPosition; m_itemHeight = itemHeight; m_headerHeight = headerHeight; m_footerHeight = footerHeight; m_fsBody = FNT_Size_1622; m_fsHeader = FNT_Size_1926; m_fsFooter = FNT_Size_1419; m_firstItemInView = 0; m_columnCount = 0; m_selectedItem = 0; m_bWidthsCalculated = false; m_iPageCount++; }
static inline void mem_error (sim_cpu *cpu, const char *message, uint64_t addr) { TRACE_MEMORY (cpu, "ERROR: %s: %" PRIx64, message, addr); }
static void __free_pages_ok (struct page *page, unsigned int order) { unsigned long index, page_idx, mask, flags; free_area_t *area; struct page *base; zone_t *zone; if (PageLRU(page)) lru_cache_del(page); if (page->buffers) BUG(); if (page->mapping) BUG(); if (!VALID_PAGE(page)) BUG(); if (PageSwapCache(page)) BUG(); if (PageLocked(page)) BUG(); if (PageLRU(page)) BUG(); if (PageActive(page)) BUG(); TRACE_MEMORY(TRACE_EV_MEMORY_PAGE_FREE, order); page->flags &= ~((1<<PG_referenced) | (1<<PG_dirty)); if (current->flags & PF_FREE_PAGES) goto local_freelist; back_local_freelist: zone = page->zone; mask = (~0UL) << order; base = zone->zone_mem_map; page_idx = page - base; if (page_idx & ~mask) BUG(); index = page_idx >> (1 + order); area = zone->free_area + order; spin_lock_irqsave(&zone->lock, flags); zone->free_pages -= mask; while (mask + (1 << (MAX_ORDER-1))) { struct page *buddy1, *buddy2; if (area >= zone->free_area + MAX_ORDER) BUG(); if (!__test_and_change_bit(index, area->map)) /* * the buddy page is still allocated. */ break; /* * Move the buddy up one level. */ buddy1 = base + (page_idx ^ -mask); buddy2 = base + page_idx; if (BAD_RANGE(zone,buddy1)) BUG(); if (BAD_RANGE(zone,buddy2)) BUG(); memlist_del(&buddy1->list); mask <<= 1; area++; index >>= 1; page_idx &= mask; } memlist_add_head(&(base + page_idx)->list, &area->free_list); spin_unlock_irqrestore(&zone->lock, flags); return; local_freelist: if (current->nr_local_pages) goto back_local_freelist; if (in_interrupt()) goto back_local_freelist; list_add(&page->list, ¤t->local_pages); page->index = order; current->nr_local_pages++; }
/* * We hold the mm semaphore and the page_table_lock on entry and * should release the pagetable lock on exit.. */ static int do_swap_page(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long address, pte_t * page_table, pte_t orig_pte, int write_access) { struct page *page; swp_entry_t entry = pte_to_swp_entry(orig_pte); pte_t pte; int ret = 1; spin_unlock(&mm->page_table_lock); page = lookup_swap_cache(entry); if (!page) { TRACE_MEMORY(TRACE_EV_MEMORY_SWAP_IN, address); swapin_readahead(entry); page = read_swap_cache_async(entry); if (!page) { /* * Back out if somebody else faulted in this pte while * we released the page table lock. */ int retval; spin_lock(&mm->page_table_lock); retval = pte_same(*page_table, orig_pte) ? -1 : 1; spin_unlock(&mm->page_table_lock); return retval; } /* Had to read the page from swap area: Major fault */ ret = 2; } mark_page_accessed(page); lock_page(page); /* * Back out if somebody else faulted in this pte while we * released the page table lock. */ spin_lock(&mm->page_table_lock); if (!pte_same(*page_table, orig_pte)) { spin_unlock(&mm->page_table_lock); unlock_page(page); page_cache_release(page); return 1; } /* The page isn't present yet, go ahead with the fault. */ swap_free(entry); if (vm_swap_full()) remove_exclusive_swap_page(page); mm->rss++; pte = mk_pte(page, vma->vm_page_prot); if (write_access && can_share_swap_page(page)) pte = pte_mkdirty(pte_mkwrite(pte)); unlock_page(page); flush_page_to_ram(page); flush_icache_page(vma, page); set_pte(page_table, pte); /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, address, pte); spin_unlock(&mm->page_table_lock); return ret; }