/* hap code to call when log_dirty is enable. return 0 if no problem found. */ int hap_enable_log_dirty(struct domain *d) { /* turn on PG_log_dirty bit in paging mode */ hap_lock(d); d->arch.paging.mode |= PG_log_dirty; hap_unlock(d); /* set l1e entries of P2M table to be read-only. */ p2m_change_type_global(d, p2m_ram_rw, p2m_ram_logdirty); flush_tlb_mask(d->domain_dirty_cpumask); return 0; }
static void hap_clean_vram_tracking(struct domain *d) { int i; struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; if ( !dirty_vram ) return; /* set l1e entries of P2M table to be read-only. */ for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++) p2m_change_type(p2m_get_hostp2m(d), i, p2m_ram_rw, p2m_ram_logdirty); flush_tlb_mask(&d->domain_dirty_cpumask); }
static int hap_disable_vram_tracking(struct domain *d) { int i; struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; if ( !dirty_vram ) return -EINVAL; hap_lock(d); d->arch.paging.mode &= ~PG_log_dirty; hap_unlock(d); /* set l1e entries of P2M table with normal mode */ for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++) p2m_change_type(p2m_get_hostp2m(d), i, p2m_ram_logdirty, p2m_ram_rw); flush_tlb_mask(&d->domain_dirty_cpumask); return 0; }
static int hap_enable_vram_tracking(struct domain *d) { int i; struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; if ( !dirty_vram ) return -EINVAL; /* turn on PG_log_dirty bit in paging mode */ hap_lock(d); d->arch.paging.mode |= PG_log_dirty; hap_unlock(d); /* set l1e entries of P2M table to be read-only. */ for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++) p2m_change_type(p2m_get_hostp2m(d), i, p2m_ram_rw, p2m_ram_logdirty); flush_tlb_mask(&d->domain_dirty_cpumask); return 0; }
int get_page_type(struct page_info *page, unsigned long type) { unsigned long nx, x, y = page->u.inuse.type_info; ASSERT(!(type & ~PGT_type_mask)); again: do { x = y; nx = x + 1; if ( unlikely((nx & PGT_count_mask) == 0) ) { MEM_LOG("Type count overflow on pfn %lx", page_to_mfn(page)); return 0; } else if ( unlikely((x & PGT_count_mask) == 0) ) { if ( (x & PGT_type_mask) != type ) { /* * On type change we check to flush stale TLB entries. This * may be unnecessary (e.g., page was GDT/LDT) but those * circumstances should be very rare. */ cpumask_t mask = page_get_owner(page)->domain_dirty_cpumask; tlbflush_filter(mask, page->tlbflush_timestamp); if ( unlikely(!cpus_empty(mask)) ) { perfc_incr(need_flush_tlb_flush); flush_tlb_mask(mask); } /* We lose existing type, back pointer, and validity. */ nx &= ~(PGT_type_mask | PGT_validated); nx |= type; /* No special validation needed for writable pages. */ /* Page tables and GDT/LDT need to be scanned for validity. */ if ( type == PGT_writable_page ) nx |= PGT_validated; } } else if ( unlikely((x & PGT_type_mask) != type) ) { return 0; } else if ( unlikely(!(x & PGT_validated)) ) { /* Someone else is updating validation of this page. Wait... */ while ( (y = page->u.inuse.type_info) == x ) cpu_relax(); goto again; } } while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) ); if ( unlikely(!(nx & PGT_validated)) ) { /* Noone else is updating simultaneously. */ __set_bit(_PGT_validated, &page->u.inuse.type_info); } return 1; }
void hap_clean_dirty_bitmap(struct domain *d) { /* set l1e entries of P2M table to be read-only. */ p2m_change_type_global(d, p2m_ram_rw, p2m_ram_logdirty); flush_tlb_mask(d->domain_dirty_cpumask); }