void arch_domain_destroy(struct domain *d) { /* IOMMU page table is shared with P2M, always call * iommu_domain_destroy() before p2m_teardown(). */ iommu_domain_destroy(d); p2m_teardown(d); domain_vgic_free(d); domain_vuart_free(d); free_xenheap_page(d->shared_info); #ifdef CONFIG_ACPI free_xenheap_pages(d->arch.efi_acpi_table, get_order_from_bytes(d->arch.efi_acpi_len)); #endif domain_io_free(d); }
void svm_destroy_vmcb(struct vcpu *v) { struct nestedvcpu *nv = &vcpu_nestedhvm(v); struct arch_svm_struct *arch_svm = &v->arch.hvm_svm; if ( nv->nv_n1vmcx != NULL ) free_vmcb(nv->nv_n1vmcx); if ( arch_svm->msrpm != NULL ) { free_xenheap_pages( arch_svm->msrpm, get_order_from_bytes(MSRPM_SIZE)); arch_svm->msrpm = NULL; } nv->nv_n1vmcx = NULL; nv->nv_n1vmcx_pa = VMCX_EADDR; arch_svm->vmcb = NULL; }
static int __init nestedhvm_setup(void) { /* Same format and size as hvm_io_bitmap (Intel needs only 2 pages). */ unsigned int nr = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) ? 2 : 3; unsigned int i, order = get_order_from_pages(nr); if ( !hvm_funcs.name ) return 0; /* shadow_io_bitmaps can't be declared static because * they must fulfill hw requirements (page aligned section) * and doing so triggers the ASSERT(va >= XEN_VIRT_START) * in __virt_to_maddr() * * So as a compromise pre-allocate them when xen boots. * This function must be called from within start_xen() when * it is valid to use _xmalloc() */ for ( i = 0; i < ARRAY_SIZE(shadow_io_bitmap); i++ ) { shadow_io_bitmap[i] = alloc_xenheap_pages(order, 0); if ( !shadow_io_bitmap[i] ) { while ( i-- ) { free_xenheap_pages(shadow_io_bitmap[i], order); shadow_io_bitmap[i] = NULL; } return -ENOMEM; } memset(shadow_io_bitmap[i], ~0U, nr << PAGE_SHIFT); } __clear_bit(0x80, shadow_io_bitmap[0]); __clear_bit(0xed, shadow_io_bitmap[1]); return 0; }
static void dump_console_ring_key(unsigned char key) { uint32_t idx, len, sofar, c; unsigned int order; char *buf; printk("'%c' pressed -> dumping console ring buffer (dmesg)\n", key); /* create a buffer in which we'll copy the ring in the correct order and NUL terminate */ order = get_order_from_bytes(conring_size + 1); buf = alloc_xenheap_pages(order, 0); if ( buf == NULL ) { printk("unable to allocate memory!\n"); return; } c = conringc; sofar = 0; while ( (c != conringp) ) { idx = CONRING_IDX_MASK(c); len = conringp - c; if ( (idx + len) > conring_size ) len = conring_size - idx; memcpy(buf + sofar, &conring[idx], len); sofar += len; c += len; } buf[sofar] = '\0'; sercon_puts(buf); video_puts(buf); free_xenheap_pages(buf, order); }
void vcpu_destroy(struct vcpu *v) { vcpu_timer_destroy(v); free_xenheap_pages(v->arch.stack, STACK_ORDER); }