/* Main interface to do xen specific suspend/resume */ static int enter_state(u32 state) { unsigned long flags; int error; unsigned long cr4; if ( (state <= ACPI_STATE_S0) || (state > ACPI_S_STATES_MAX) ) return -EINVAL; if ( !spin_trylock(&pm_lock) ) return -EBUSY; BUG_ON(system_state != SYS_STATE_active); system_state = SYS_STATE_suspend; printk(XENLOG_INFO "Preparing system for ACPI S%d state.\n", state); freeze_domains(); acpi_dmar_reinstate(); if ( (error = disable_nonboot_cpus()) ) { system_state = SYS_STATE_resume; goto enable_cpu; } cpufreq_del_cpu(0); hvm_cpu_down(); acpi_sleep_prepare(state); console_start_sync(); printk("Entering ACPI S%d state.\n", state); local_irq_save(flags); spin_debug_disable(); if ( (error = device_power_down()) ) { printk(XENLOG_ERR "Some devices failed to power down."); system_state = SYS_STATE_resume; goto done; } ACPI_FLUSH_CPU_CACHE(); switch ( state ) { case ACPI_STATE_S3: do_suspend_lowlevel(); system_reset_counter++; error = tboot_s3_resume(); break; case ACPI_STATE_S5: acpi_enter_sleep_state(ACPI_STATE_S5); break; default: error = -EINVAL; break; } system_state = SYS_STATE_resume; /* Restore CR4 and EFER from cached values. */ cr4 = read_cr4(); write_cr4(cr4 & ~X86_CR4_MCE); write_efer(read_efer()); device_power_up(); mcheck_init(&boot_cpu_data, 0); write_cr4(cr4); printk(XENLOG_INFO "Finishing wakeup from ACPI S%d state.\n", state); if ( (state == ACPI_STATE_S3) && error ) tboot_s3_error(error); done: spin_debug_enable(); local_irq_restore(flags); console_end_sync(); acpi_sleep_post(state); if ( hvm_cpu_up() ) BUG(); enable_cpu: cpufreq_add_cpu(0); microcode_resume_cpu(0); rcu_barrier(); mtrr_aps_sync_begin(); enable_nonboot_cpus(); mtrr_aps_sync_end(); adjust_vtd_irq_affinities(); acpi_dmar_zap(); thaw_domains(); system_state = SYS_STATE_active; spin_unlock(&pm_lock); return error; }
static int __init pvh_setup_acpi_xsdt(struct domain *d, paddr_t madt_addr, paddr_t *addr) { struct acpi_table_xsdt *xsdt; struct acpi_table_header *table; struct acpi_table_rsdp *rsdp; unsigned long size = sizeof(*xsdt); unsigned int i, j, num_tables = 0; paddr_t xsdt_paddr; int rc; /* * Restore original DMAR table signature, we are going to filter it from * the new XSDT that is presented to the guest, so it is no longer * necessary to have it's signature zapped. */ acpi_dmar_reinstate(); /* Count the number of tables that will be added to the XSDT. */ for( i = 0; i < acpi_gbl_root_table_list.count; i++ ) { const char *sig = acpi_gbl_root_table_list.tables[i].signature.ascii; if ( pvh_acpi_table_allowed(sig) ) num_tables++; } /* * No need to add or subtract anything because struct acpi_table_xsdt * includes one array slot already, and we have filtered out the original * MADT and we are going to add a custom built MADT. */ size += num_tables * sizeof(xsdt->table_offset_entry[0]); xsdt = xzalloc_bytes(size); if ( !xsdt ) { printk("Unable to allocate memory for XSDT table\n"); rc = -ENOMEM; goto out; } /* Copy the native XSDT table header. */ rsdp = acpi_os_map_memory(acpi_os_get_root_pointer(), sizeof(*rsdp)); if ( !rsdp ) { printk("Unable to map RSDP\n"); rc = -EINVAL; goto out; } xsdt_paddr = rsdp->xsdt_physical_address; acpi_os_unmap_memory(rsdp, sizeof(*rsdp)); table = acpi_os_map_memory(xsdt_paddr, sizeof(*table)); if ( !table ) { printk("Unable to map XSDT\n"); rc = -EINVAL; goto out; } xsdt->header = *table; acpi_os_unmap_memory(table, sizeof(*table)); /* Add the custom MADT. */ xsdt->table_offset_entry[0] = madt_addr; /* Copy the addresses of the rest of the allowed tables. */ for( i = 0, j = 1; i < acpi_gbl_root_table_list.count; i++ ) { const char *sig = acpi_gbl_root_table_list.tables[i].signature.ascii; if ( pvh_acpi_table_allowed(sig) ) xsdt->table_offset_entry[j++] = acpi_gbl_root_table_list.tables[i].address; } xsdt->header.revision = 1; xsdt->header.length = size; /* * Calling acpi_tb_checksum here is a layering violation, but * introducing a wrapper for such simple usage seems overkill. */ xsdt->header.checksum -= acpi_tb_checksum(ACPI_CAST_PTR(u8, xsdt), size); /* Place the new XSDT in guest memory space. */ if ( pvh_steal_ram(d, size, 0, GB(4), addr) ) { printk("Unable to find guest RAM for XSDT\n"); rc = -ENOMEM; goto out; } /* Mark this region as E820_ACPI. */ if ( pvh_add_mem_range(d, *addr, *addr + size, E820_ACPI) ) printk("Unable to add XSDT region to memory map\n"); rc = hvm_copy_to_guest_phys(*addr, xsdt, size, d->vcpu[0]); if ( rc ) { printk("Unable to copy XSDT into guest memory\n"); goto out; } rc = 0; out: xfree(xsdt); return rc; }