static int copyio(int copy_type, user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes, vm_size_t *lencopied, int use_kernel_map) { thread_t thread; pmap_t pmap; vm_size_t bytes_copied; int error = 0; boolean_t istate = FALSE; boolean_t recursive_CopyIOActive; #if KDEBUG int debug_type = 0xeff70010; debug_type += (copy_type << 2); #endif thread = current_thread(); KERNEL_DEBUG(debug_type | DBG_FUNC_START, (unsigned)(user_addr >> 32), (unsigned)user_addr, nbytes, thread->machine.copyio_state, 0); if (nbytes == 0) goto out; pmap = thread->map->pmap; if ((copy_type != COPYINPHYS) && (copy_type != COPYOUTPHYS) && ((vm_offset_t)kernel_addr < VM_MIN_KERNEL_AND_KEXT_ADDRESS)) { panic("Invalid copy parameter, copy type: %d, kernel address: %p", copy_type, kernel_addr); } /* Sanity and security check for addresses to/from a user */ if (((pmap != kernel_pmap) && (use_kernel_map == 0)) && ((nbytes && (user_addr+nbytes <= user_addr)) || ((user_addr + nbytes) > vm_map_max(thread->map)))) { error = EFAULT; goto out; } /* * If the no_shared_cr3 boot-arg is set (true), the kernel runs on * its own pmap and cr3 rather than the user's -- so that wild accesses * from kernel or kexts can be trapped. So, during copyin and copyout, * we need to switch back to the user's map/cr3. The thread is flagged * "CopyIOActive" at this time so that if the thread is pre-empted, * we will later restore the correct cr3. */ recursive_CopyIOActive = thread->machine.specFlags & CopyIOActive; thread->machine.specFlags |= CopyIOActive; user_access_enable(); if (no_shared_cr3) { istate = ml_set_interrupts_enabled(FALSE); if (get_cr3_base() != pmap->pm_cr3) set_cr3_raw(pmap->pm_cr3); } /* * Ensure that we're running on the target thread's cr3. */ if ((pmap != kernel_pmap) && !use_kernel_map && (get_cr3_base() != pmap->pm_cr3)) { panic("copyio(%d,%p,%p,%ld,%p,%d) cr3 is %p expects %p", copy_type, (void *)user_addr, kernel_addr, nbytes, lencopied, use_kernel_map, (void *) get_cr3_raw(), (void *) pmap->pm_cr3); } if (no_shared_cr3) (void) ml_set_interrupts_enabled(istate); KERNEL_DEBUG(0xeff70044 | DBG_FUNC_NONE, (unsigned)user_addr, (unsigned)kernel_addr, nbytes, 0, 0); switch (copy_type) { case COPYIN: error = _bcopy((const void *) user_addr, kernel_addr, nbytes); break; case COPYOUT: error = _bcopy(kernel_addr, (void *) user_addr, nbytes); break; case COPYINPHYS: error = _bcopy((const void *) user_addr, PHYSMAP_PTOV(kernel_addr), nbytes); break; case COPYOUTPHYS: error = _bcopy((const void *) PHYSMAP_PTOV(kernel_addr), (void *) user_addr, nbytes); break; case COPYINSTR: error = _bcopystr((const void *) user_addr, kernel_addr, (int) nbytes, &bytes_copied); /* * lencopied should be updated on success * or ENAMETOOLONG... but not EFAULT */ if (error != EFAULT) *lencopied = bytes_copied; if (error) { #if KDEBUG nbytes = *lencopied; #endif break; } if (*(kernel_addr + bytes_copied - 1) == 0) { /* * we found a NULL terminator... we're done */ #if KDEBUG nbytes = *lencopied; #endif break; } else { /* * no more room in the buffer and we haven't * yet come across a NULL terminator */ #if KDEBUG nbytes = *lencopied; #endif error = ENAMETOOLONG; break; } break; } user_access_disable(); if (!recursive_CopyIOActive) { thread->machine.specFlags &= ~CopyIOActive; } if (no_shared_cr3) { istate = ml_set_interrupts_enabled(FALSE); if (get_cr3_raw() != kernel_pmap->pm_cr3) set_cr3_raw(kernel_pmap->pm_cr3); (void) ml_set_interrupts_enabled(istate); } out: KERNEL_DEBUG(debug_type | DBG_FUNC_END, (unsigned)user_addr, (unsigned)kernel_addr, (unsigned)nbytes, error, 0); return (error); }
kern_return_t pal_efi_call_in_32bit_mode(uint32_t func, struct pal_efi_registers *efi_reg, void *stack_contents, size_t stack_contents_size, /* 16-byte multiple */ uint32_t *efi_status) { DBG("pal_efi_call_in_32bit_mode(0x%08x, %p, %p, %lu, %p)\n", func, efi_reg, stack_contents, stack_contents_size, efi_status); if (func == 0) { return KERN_INVALID_ADDRESS; } if ((efi_reg == NULL) || (stack_contents == NULL) || (stack_contents_size % 16 != 0)) { return KERN_INVALID_ARGUMENT; } if (!gPEEFISystemTable || !gPEEFIRuntimeServices) { return KERN_NOT_SUPPORTED; } DBG("pal_efi_call_in_32bit_mode() efi_reg:\n"); DBG(" rcx: 0x%016llx\n", efi_reg->rcx); DBG(" rdx: 0x%016llx\n", efi_reg->rdx); DBG(" r8: 0x%016llx\n", efi_reg->r8); DBG(" r9: 0x%016llx\n", efi_reg->r9); DBG(" rax: 0x%016llx\n", efi_reg->rax); DBG("pal_efi_call_in_32bit_mode() stack:\n"); #if PAL_DEBUG size_t i; for (i = 0; i < stack_contents_size; i += sizeof(uint32_t)) { uint32_t *p = (uint32_t *) ((uintptr_t)stack_contents + i); DBG(" %p: 0x%08x\n", p, *p); } #endif #ifdef __x86_64__ /* * Ensure no interruptions. * Taking a spinlock for serialization is technically unnecessary * because the EFIRuntime kext should serialize. */ boolean_t istate = ml_set_interrupts_enabled(FALSE); simple_lock(&pal_efi_lock); /* * Switch to special page tables with the entire high kernel space * double-mapped into the bottom 4GB. * * NB: We assume that all data passed exchanged with RuntimeServices is * located in the 4GB of KVA based at VM_MIN_ADDRESS. In particular, kexts * loaded the basement (below VM_MIN_ADDRESS) cannot pass static data. * Kernel stack and heap space is OK. */ MARK_CPU_IDLE(cpu_number()); pal_efi_saved_cr3 = get_cr3_raw(); pal_efi_saved_cr0 = get_cr0(); IDPML4[KERNEL_PML4_INDEX] = IdlePML4[KERNEL_PML4_INDEX]; IDPML4[0] = IdlePML4[KERNEL_PML4_INDEX]; clear_ts(); set_cr3_raw((uint64_t) ID_MAP_VTOP(IDPML4)); swapgs(); /* Save kernel's GS base */ /* Set segment state ready for compatibility mode */ set_gs(NULL_SEG); set_fs(NULL_SEG); set_es(KERNEL_DS); set_ds(KERNEL_DS); set_ss(KERNEL_DS); _pal_efi_call_in_32bit_mode_asm(func, efi_reg, stack_contents, stack_contents_size); /* Restore NULL segment state */ set_ss(NULL_SEG); set_es(NULL_SEG); set_ds(NULL_SEG); swapgs(); /* Restore kernel's GS base */ /* Restore the 64-bit user GS base we just destroyed */ wrmsr64(MSR_IA32_KERNEL_GS_BASE, current_cpu_datap()->cpu_uber.cu_user_gs_base); /* End of mapping games */ set_cr3_raw(pal_efi_saved_cr3); set_cr0(pal_efi_saved_cr0); MARK_CPU_ACTIVE(cpu_number()); simple_unlock(&pal_efi_lock); ml_set_interrupts_enabled(istate); #else _pal_efi_call_in_32bit_mode_asm(func, efi_reg, stack_contents, stack_contents_size); #endif *efi_status = (uint32_t)efi_reg->rax; DBG("pal_efi_call_in_32bit_mode() efi_status: 0x%x\n", *efi_status); return KERN_SUCCESS; }