void __init efi_runtime_update_mappings(void) { efi_memory_desc_t *md; if (efi_enabled(EFI_OLD_MEMMAP)) { if (__supported_pte_mask & _PAGE_NX) runtime_code_page_mkexec(); return; } /* * Use the EFI Memory Attribute Table for mapping permissions if it * exists, since it is intended to supersede EFI_PROPERTIES_TABLE. */ if (efi_enabled(EFI_MEM_ATTR)) { efi_memattr_apply_permissions(NULL, efi_update_mem_attr); return; } /* * EFI_MEMORY_ATTRIBUTES_TABLE is intended to replace * EFI_PROPERTIES_TABLE. So, use EFI_PROPERTIES_TABLE to update * permissions only if EFI_MEMORY_ATTRIBUTES_TABLE is not * published by the firmware. Even if we find a buggy implementation of * EFI_MEMORY_ATTRIBUTES_TABLE, don't fall back to * EFI_PROPERTIES_TABLE, because of the same reason. */ if (!efi_enabled(EFI_NX_PE_DATA)) return; for_each_efi_memory_desc(md) { unsigned long pf = 0; if (!(md->attribute & EFI_MEMORY_RUNTIME)) continue; if (!(md->attribute & EFI_MEMORY_WB)) pf |= _PAGE_PCD; if ((md->attribute & EFI_MEMORY_XP) || (md->type == EFI_RUNTIME_SERVICES_DATA)) pf |= _PAGE_NX; if (!(md->attribute & EFI_MEMORY_RO) && (md->type != EFI_RUNTIME_SERVICES_CODE)) pf |= _PAGE_RW; if (sev_active()) pf |= _PAGE_ENC; efi_update_mappings(md, pf); } }
/* * To be called after the EFI page tables have been populated. If a memory * attributes table is available, its contents will be used to update the * mappings with tightened permissions as described by the table. * This requires the UEFI memory map to have already been populated with * virtual addresses. */ int __init efi_memattr_apply_permissions(struct mm_struct *mm, efi_memattr_perm_setter fn) { efi_memory_attributes_table_t *tbl; int i, ret; if (tbl_size <= sizeof(*tbl)) return 0; /* * We need the EFI memory map to be setup so we can use it to * lookup the virtual addresses of all entries in the of EFI * Memory Attributes table. If it isn't available, this * function should not be called. */ if (WARN_ON(!efi_enabled(EFI_MEMMAP))) return 0; tbl = memremap(efi.mem_attr_table, tbl_size, MEMREMAP_WB); if (!tbl) { pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n", efi.mem_attr_table); return -ENOMEM; } if (efi_enabled(EFI_DBG)) pr_info("Processing EFI Memory Attributes table:\n"); for (i = ret = 0; ret == 0 && i < tbl->num_entries; i++) { efi_memory_desc_t md; unsigned long size; bool valid; char buf[64]; valid = entry_is_valid((void *)tbl->entry + i * tbl->desc_size, &md); size = md.num_pages << EFI_PAGE_SHIFT; if (efi_enabled(EFI_DBG) || !valid) pr_info("%s 0x%012llx-0x%012llx %s\n", valid ? "" : "!", md.phys_addr, md.phys_addr + size - 1, efi_md_typeattr_format(buf, sizeof(buf), &md)); if (valid) ret = fn(mm, &md); } memunmap(tbl); return ret; }
static int __init ptdump_init(void) { if (!efi_enabled(EFI_RUNTIME_SERVICES)) return 0; return ptdump_debugfs_register(&efi_ptdump_info, "efi_page_tables"); }
void efi_setup_page_tables(void) { efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd; if (!efi_enabled(EFI_OLD_MEMMAP)) efi_scratch.use_pgd = true; }
acpi_physical_address __init acpi_os_get_root_pointer(void) { #ifdef CONFIG_KEXEC if (acpi_rsdp) return acpi_rsdp; #endif if (efi_enabled(EFI_CONFIG_TABLES)) { if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) return efi.acpi20; else if (efi.acpi != EFI_INVALID_TABLE_ADDR) return efi.acpi; else { printk(KERN_ERR PREFIX "System description tables not found\n"); return 0; } } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) { acpi_physical_address pa = 0; acpi_find_root_pointer(&pa); return pa; } return 0; }
/** * state - control system power state. * * show() returns what states are supported, which is hard-coded to * 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and * 'disk' (Suspend-to-Disk). * * store() accepts one of those strings, translates it into the * proper enumerated value, and initiates a suspend transition. */ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { char *s = buf; #ifdef CONFIG_SUSPEND int i; for (i = 0; i < PM_SUSPEND_MAX; i++) { if (pm_states[i] && valid_state(i)) s += sprintf(s,"%s ", pm_states[i]); } #endif #ifdef CONFIG_HIBERNATION if (!efi_enabled(EFI_SECURE_BOOT)) { s += sprintf(s, "%s\n", "disk"); } else { s += sprintf(s, "\n"); } #else if (s != buf) /* convert the last space to a newline */ *(s-1) = '\n'; #endif return (s - buf); }
int efivars_sysfs_init(void) { struct kobject *parent_kobj = efivars_kobject(); int error = 0; if (!efi_enabled(EFI_RUNTIME_SERVICES)) return -ENODEV; /* No efivars has been registered yet */ if (!parent_kobj) return 0; printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION, EFIVARS_DATE); efivars_kset = kset_create_and_add("vars", NULL, parent_kobj); if (!efivars_kset) { printk(KERN_ERR "efivars: Subsystem registration failed.\n"); return -ENOMEM; } efivar_init(efivars_sysfs_callback, NULL, false, true, &efivar_sysfs_list); error = create_efivars_bin_attributes(); if (error) { efivars_sysfs_exit(); return error; } INIT_WORK(&efivar_work, efivar_update_sysfs_entries); return 0; }
/* * We need our own copy of the higher levels of the page tables * because we want to avoid inserting EFI region mappings (EFI_VA_END * to EFI_VA_START) into the standard kernel page tables. Everything * else can be shared, see efi_sync_low_kernel_mappings(). */ int __init efi_alloc_page_tables(void) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; gfp_t gfp_mask; if (efi_enabled(EFI_OLD_MEMMAP)) return 0; gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO; efi_pgd = (pgd_t *)__get_free_page(gfp_mask); if (!efi_pgd) return -ENOMEM; pgd = efi_pgd + pgd_index(EFI_VA_END); p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END); if (!p4d) { free_page((unsigned long)efi_pgd); return -ENOMEM; } pud = pud_alloc(&init_mm, p4d, EFI_VA_END); if (!pud) { if (CONFIG_PGTABLE_LEVELS > 4) free_page((unsigned long) pgd_page_vaddr(*pgd)); free_page((unsigned long)efi_pgd); return -ENOMEM; } return 0; }
/* * We need our own copy of the higher levels of the page tables * because we want to avoid inserting EFI region mappings (EFI_VA_END * to EFI_VA_START) into the standard kernel page tables. Everything * else can be shared, see efi_sync_low_kernel_mappings(). */ int __init efi_alloc_page_tables(void) { pgd_t *pgd; pud_t *pud; gfp_t gfp_mask; if (efi_enabled(EFI_OLD_MEMMAP)) return 0; gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO; efi_pgd = (pgd_t *)__get_free_page(gfp_mask); if (!efi_pgd) return -ENOMEM; pgd = efi_pgd + pgd_index(EFI_VA_END); pud = pud_alloc_one(NULL, 0); if (!pud) { free_page((unsigned long)efi_pgd); return -ENOMEM; } pgd_populate(NULL, pgd, pud); return 0; }
void efi_reboot(enum reboot_mode reboot_mode, const char *__unused) { const char *str[] = { "cold", "warm", "shutdown", "platform" }; int efi_mode, cap_reset_mode; if (!efi_enabled(EFI_RUNTIME_SERVICES)) return; switch (reboot_mode) { case REBOOT_WARM: case REBOOT_SOFT: efi_mode = EFI_RESET_WARM; break; default: efi_mode = EFI_RESET_COLD; break; } /* * If a quirk forced an EFI reset mode, always use that. */ if (efi_reboot_quirk_mode != -1) efi_mode = efi_reboot_quirk_mode; if (efi_capsule_pending(&cap_reset_mode)) { if (efi_mode != cap_reset_mode) printk(KERN_CRIT "efi: %s reset requested but pending " "capsule update requires %s reset... Performing " "%s reset.\n", str[efi_mode], str[cap_reset_mode], str[cap_reset_mode]); efi_mode = cap_reset_mode; } efi.reset_system(efi_mode, EFI_SUCCESS, 0, NULL); }
pgd_t * __init efi_call_phys_prolog(void) { unsigned long vaddress; pgd_t *save_pgd; int pgd; int n_pgds; if (!efi_enabled(EFI_OLD_MEMMAP)) { save_pgd = (pgd_t *)read_cr3(); write_cr3((unsigned long)efi_scratch.efi_pgt); goto out; } early_code_mapping_set_exec(1); n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL); for (pgd = 0; pgd < n_pgds; pgd++) { save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE); vaddress = (unsigned long)__va(pgd * PGDIR_SIZE); set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress)); } out: __flush_tlb_all(); return save_pgd; }
static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { int i; char *start = buf; if (efi_enabled(EFI_SECURE_BOOT)) { buf += sprintf(buf, "[%s]\n", "disabled"); return buf-start; } for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) { if (!hibernation_modes[i]) continue; switch (i) { case HIBERNATION_SHUTDOWN: case HIBERNATION_REBOOT: #ifdef CONFIG_SUSPEND case HIBERNATION_SUSPEND: #endif break; case HIBERNATION_PLATFORM: if (hibernation_ops) break; /* not a valid mode, continue with loop */ continue; } if (i == hibernation_mode) buf += sprintf(buf, "[%s] ", hibernation_modes[i]); else buf += sprintf(buf, "%s ", hibernation_modes[i]); } buf += sprintf(buf, "\n"); return buf-start; }
/* * Restart requires that the secondary CPUs stop performing any activity * while the primary CPU resets the system. Systems with multiple CPUs must * provide a HW restart implementation, to ensure that all CPUs reset at once. * This is required so that any code running after reset on the primary CPU * doesn't have to co-ordinate with other CPUs to ensure they aren't still * executing pre-reset code, and using RAM that the primary CPU's code wishes * to use. Implementing such co-ordination would be essentially impossible. */ void machine_restart(char *cmd) { /* Disable interrupts first */ local_irq_disable(); smp_send_stop(); /* * UpdateCapsule() depends on the system being reset via * ResetSystem(). */ if (efi_enabled(EFI_RUNTIME_SERVICES)) efi_reboot(reboot_mode, NULL); /* Now call the architecture specific reboot code. */ if (arm_pm_restart) arm_pm_restart(reboot_mode, cmd); else do_kernel_restart(cmd); /* * Whoops - the architecture was unable to reboot. */ printk("Reboot failed -- System halted\n"); while (1); }
void __init efi_runtime_mkexec(void) { if (!efi_enabled(EFI_OLD_MEMMAP)) return; if (__supported_pte_mask & _PAGE_NX) runtime_code_page_mkexec(); }
void __init efi_dump_pagetable(void) { #ifdef CONFIG_EFI_PGT_DUMP if (efi_enabled(EFI_OLD_MEMMAP)) ptdump_walk_pgd_level(NULL, swapper_pg_dir); else ptdump_walk_pgd_level(NULL, efi_pgd); #endif }
static int __init rtc_init(void) { if (efi_enabled(EFI_RUNTIME_SERVICES)) if (platform_device_register(&rtc_efi_dev) < 0) pr_err("unable to register rtc device...\n"); /* not necessarily an error */ return 0; }
static __init int efivarfs_init(void) { if (!efi_enabled(EFI_RUNTIME_SERVICES)) return 0; if (!efivars_kobject()) return 0; return register_filesystem(&efivarfs_type); }
/* * Enable the UEFI Runtime Services if all prerequisites are in place, i.e., * non-early mapping of the UEFI system table and virtual mappings for all * EFI_MEMORY_RUNTIME regions. */ static int __init arm_enable_runtime_services(void) { u64 mapsize; if (!efi_enabled(EFI_BOOT)) { pr_info("EFI services will not be available.\n"); return 0; } efi_memmap_unmap(); mapsize = efi.memmap.desc_size * efi.memmap.nr_map; if (efi_memmap_init_late(efi.memmap.phys_map, mapsize)) { pr_err("Failed to remap EFI memory map\n"); return 0; } if (efi_runtime_disabled()) { pr_info("EFI runtime services will be disabled.\n"); return 0; } if (efi_enabled(EFI_RUNTIME_SERVICES)) { pr_info("EFI runtime services access via paravirt.\n"); return 0; } pr_info("Remapping and enabling EFI services.\n"); if (!efi_virtmap_init()) { pr_err("UEFI virtual mapping missing or invalid -- runtime services will not be available\n"); return -ENOMEM; } /* Set up runtime services function pointers */ efi_native_runtime_setup(); set_bit(EFI_RUNTIME_SERVICES, &efi.flags); return 0; }
static int __init efi_shutdown_init(void) { if (!efi_enabled(EFI_RUNTIME_SERVICES)) return -ENODEV; if (efi_poweroff_required()) { orig_pm_power_off = pm_power_off; pm_power_off = efi_power_off; } return 0; }
static int __init efi_capsule_loader_init(void) { int ret; if (!efi_enabled(EFI_RUNTIME_SERVICES)) return -ENODEV; ret = misc_register(&efi_capsule_misc); if (ret) pr_err("Unable to register capsule loader device\n"); return ret; }
void __init efi_map_region(efi_memory_desc_t *md) { unsigned long size = md->num_pages << PAGE_SHIFT; u64 pa = md->phys_addr; if (efi_enabled(EFI_OLD_MEMMAP)) return old_map_region(md); /* * Make sure the 1:1 mappings are present as a catch-all for b0rked * firmware which doesn't update all internal pointers after switching * to virtual mode and would otherwise crap on us. */ __map_region(md, md->phys_addr); /* * Enforce the 1:1 mapping as the default virtual address when * booting in EFI mixed mode, because even though we may be * running a 64-bit kernel, the firmware may only be 32-bit. */ if (!efi_is_native () && IS_ENABLED(CONFIG_EFI_MIXED)) { md->virt_addr = md->phys_addr; return; } efi_va -= size; /* Is PA 2M-aligned? */ if (!(pa & (PMD_SIZE - 1))) { efi_va &= PMD_MASK; } else { u64 pa_offset = pa & (PMD_SIZE - 1); u64 prev_va = efi_va; /* get us the same offset within this 2M page */ efi_va = (efi_va & PMD_MASK) + pa_offset; if (efi_va > prev_va) efi_va -= PMD_SIZE; } if (efi_va < EFI_VA_END) { pr_warn(FW_WARN "VA address range overflow!\n"); return; } /* Do the VA map */ __map_region(md, efi_va); md->virt_addr = efi_va; }
static void __init ms_hyperv_init_platform(void) { /* * Extract the features and hints */ ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES); ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES); ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO); pr_info("HyperV: features 0x%x, hints 0x%x\n", ms_hyperv.features, ms_hyperv.hints); #ifdef CONFIG_X86_LOCAL_APIC if (ms_hyperv.features & HV_X64_MSR_APIC_FREQUENCY_AVAILABLE) { /* * Get the APIC frequency. */ u64 hv_lapic_frequency; rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency); hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ); lapic_timer_frequency = hv_lapic_frequency; pr_info("HyperV: LAPIC Timer Frequency: %#x\n", lapic_timer_frequency); } register_nmi_handler(NMI_UNKNOWN, hv_nmi_unknown, NMI_FLAG_FIRST, "hv_nmi_unknown"); #endif if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); #ifdef CONFIG_X86_IO_APIC no_timer_check = 1; #endif #if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE) machine_ops.shutdown = hv_machine_shutdown; machine_ops.crash_shutdown = hv_machine_crash_shutdown; #endif mark_tsc_unstable("running on Hyper-V"); /* * Generation 2 instances don't support reading the NMI status from * 0x61 port. */ if (efi_enabled(EFI_BOOT)) x86_platform.get_nmi_reason = hv_get_nmi_reason; }
/* * Add low kernel mappings for passing arguments to EFI functions. */ void efi_sync_low_kernel_mappings(void) { unsigned num_pgds; pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); if (efi_enabled(EFI_OLD_MEMMAP)) return; num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET); memcpy(pgd + pgd_index(PAGE_OFFSET), init_mm.pgd + pgd_index(PAGE_OFFSET), sizeof(pgd_t) * num_pgds); }
/* * Add low kernel mappings for passing arguments to EFI functions. */ void efi_sync_low_kernel_mappings(void) { unsigned num_entries; pgd_t *pgd_k, *pgd_efi; pud_t *pud_k, *pud_efi; if (efi_enabled(EFI_OLD_MEMMAP)) return; /* * We can share all PGD entries apart from the one entry that * covers the EFI runtime mapping space. * * Make sure the EFI runtime region mappings are guaranteed to * only span a single PGD entry and that the entry also maps * other important kernel regions. */ BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END)); BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) != (EFI_VA_END & PGDIR_MASK)); pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET); pgd_k = pgd_offset_k(PAGE_OFFSET); num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET); memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries); /* * We share all the PUD entries apart from those that map the * EFI regions. Copy around them. */ BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0); BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0); pgd_efi = efi_pgd + pgd_index(EFI_VA_END); pud_efi = pud_offset(pgd_efi, 0); pgd_k = pgd_offset_k(EFI_VA_END); pud_k = pud_offset(pgd_k, 0); num_entries = pud_index(EFI_VA_END); memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries); pud_efi = pud_offset(pgd_efi, EFI_VA_START); pud_k = pud_offset(pgd_k, EFI_VA_START); num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START); memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries); }
static int __init efibc_init(void) { int ret; if (!efi_enabled(EFI_RUNTIME_SERVICES)) return 0; ret = register_reboot_notifier(&efibc_reboot_notifier); if (ret) { pr_err("efibc: unable to register reboot notifier\n"); return ret; } return 0; }
void __init efi_call_phys_epilog(pgd_t *save_pgd) { /* * After the lock is released, the original page table is restored. */ int pgd_idx, i; int nr_pgds; pgd_t *pgd; p4d_t *p4d; pud_t *pud; if (!efi_enabled(EFI_OLD_MEMMAP)) { write_cr3((unsigned long)save_pgd); __flush_tlb_all(); return; } nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) { pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE); set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]); if (!(pgd_val(*pgd) & _PAGE_PRESENT)) continue; for (i = 0; i < PTRS_PER_P4D; i++) { p4d = p4d_offset(pgd, pgd_idx * PGDIR_SIZE + i * P4D_SIZE); if (!(p4d_val(*p4d) & _PAGE_PRESENT)) continue; pud = (pud_t *)p4d_page_vaddr(*p4d); pud_free(&init_mm, pud); } p4d = (p4d_t *)pgd_page_vaddr(*pgd); p4d_free(&init_mm, p4d); } kfree(save_pgd); __flush_tlb_all(); early_code_mapping_set_exec(0); }
/* * A number of config table entries get remapped to virtual addresses * after entering EFI virtual mode. However, the kexec kernel requires * their physical addresses therefore we pass them via setup_data and * correct those entries to their respective physical addresses here. * * Currently only handles smbios which is necessary for some firmware * implementation. */ int __init efi_reuse_config(u64 tables, int nr_tables) { int i, sz, ret = 0; void *p, *tablep; struct efi_setup_data *data; if (!efi_setup) return 0; if (!efi_enabled(EFI_64BIT)) return 0; data = early_memremap(efi_setup, sizeof(*data)); if (!data) { ret = -ENOMEM; goto out; } if (!data->smbios) goto out_memremap; sz = sizeof(efi_config_table_64_t); p = tablep = early_memremap(tables, nr_tables * sz); if (!p) { pr_err("Could not map Configuration table!\n"); ret = -ENOMEM; goto out_memremap; } for (i = 0; i < efi.systab->nr_tables; i++) { efi_guid_t guid; guid = ((efi_config_table_64_t *)p)->guid; if (!efi_guidcmp(guid, SMBIOS_TABLE_GUID)) ((efi_config_table_64_t *)p)->table = data->smbios; p += sz; } early_memunmap(tablep, nr_tables * sz); out_memremap: early_memunmap(data, sizeof(*data)); out: return ret; }
void __init efi_call_phys_epilog(void) { /* * After the lock is released, the original page table is restored. */ int pgd; int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); if (!efi_enabled(EFI_OLD_MEMMAP)) return; for (pgd = 0; pgd < n_pgds; pgd++) set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]); kfree(save_pgd); __flush_tlb_all(); local_irq_restore(efi_flags); early_code_mapping_set_exec(0); }
bool arch_ima_get_secureboot(void) { static enum efi_secureboot_mode sb_mode; static bool initialized; if (!initialized && efi_enabled(EFI_BOOT)) { sb_mode = boot_params.secure_boot; if (sb_mode == efi_secureboot_mode_unset) sb_mode = get_sb_mode(); initialized = true; } if (sb_mode == efi_secureboot_mode_enabled) return true; else return false; }
static enum efi_secureboot_mode get_sb_mode(void) { efi_char16_t efi_SecureBoot_name[] = L"SecureBoot"; efi_char16_t efi_SetupMode_name[] = L"SecureBoot"; efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID; efi_status_t status; unsigned long size; u8 secboot, setupmode; size = sizeof(secboot); if (!efi_enabled(EFI_RUNTIME_SERVICES)) { pr_info("ima: secureboot mode unknown, no efi\n"); return efi_secureboot_mode_unknown; } /* Get variable contents into buffer */ status = efi.get_variable(efi_SecureBoot_name, &efi_variable_guid, NULL, &size, &secboot); if (status == EFI_NOT_FOUND) { pr_info("ima: secureboot mode disabled\n"); return efi_secureboot_mode_disabled; } if (status != EFI_SUCCESS) { pr_info("ima: secureboot mode unknown\n"); return efi_secureboot_mode_unknown; } size = sizeof(setupmode); status = efi.get_variable(efi_SetupMode_name, &efi_variable_guid, NULL, &size, &setupmode); if (status != EFI_SUCCESS) /* ignore unknown SetupMode */ setupmode = 0; if (secboot == 0 || setupmode == 1) { pr_info("ima: secureboot mode disabled\n"); return efi_secureboot_mode_disabled; } pr_info("ima: secureboot mode enabled\n"); return efi_secureboot_mode_enabled; }