// Driver entry point. Initializes globals and registers driver node in /dev. kern_return_t pmem_start(kmod_info_t * ki, void *d) { int error = 0; pmem_log("Loading /dev/%s driver", pmem_pmem_devname); // Memory allocations are tagged to prevent leaks pmem_tag = OSMalloc_Tagalloc(pmem_tagname, OSMT_DEFAULT); // Allocate one page for zero padding of illegal read requests pmem_zero_page = static_cast<uint8_t *>(OSMalloc(PAGE_SIZE, pmem_tag)); if (pmem_zero_page == NULL) { pmem_error("Failed to allocate memory for page buffer"); return pmem_cleanup(KERN_FAILURE); } bzero(pmem_zero_page, PAGE_SIZE); // Access the boot arguments through the platform export, // and parse the systems physical memory configuration. boot_args * ba = reinterpret_cast<boot_args *>(PE_state.bootArgs); pmem_physmem_size = ba->PhysicalMemorySize; pmem_mmap = reinterpret_cast<EfiMemoryRange *>(ba->MemoryMap + pmem_kernel_voffset); pmem_mmap_desc_size = ba->MemoryMapDescriptorSize; pmem_mmap_size = ba->MemoryMapSize; pmem_log("Size of physical memory:%lld", pmem_physmem_size); pmem_log("Size of physical pages:%d (PAGE_SHIFT=%d, PAGE_MASK=%#016x)", PAGE_SIZE, PAGE_SHIFT, PAGE_MASK); pmem_log("Phys. Memory map at:%#016llx (size:%lld desc_size:%d)", pmem_mmap, pmem_mmap_size, pmem_mmap_desc_size); pmem_log("Number of segments in memory map: %d", pmem_mmap_size / pmem_mmap_desc_size); // Install switch table pmem_devmajor = cdevsw_add(-1, &pmem_cdevsw); if (pmem_devmajor == -1) { pmem_error("Failed to create character device"); return pmem_cleanup(KERN_FAILURE); } // Create physical memory device file pmem_log("Adding node /dev/%s", pmem_pmem_devname); pmem_devpmemnode = devfs_make_node(makedev(pmem_devmajor, pmem_dev_pmem_minor), DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0660, pmem_pmem_devname); if (pmem_devpmemnode == NULL) { pmem_error("Failed to create /dev/%s node", pmem_pmem_devname); return pmem_cleanup(KERN_FAILURE); } pmem_log("obtaining kernel dtb pointer"); __asm__ __volatile__("movq %%cr3, %0" :"=r"(pmem_dtb)); // Only bits 51-12 (inclusive) in cr3 are part of the dtb pointer pmem_dtb &= ~PAGE_MASK; pmem_log("kernel dtb: %#016llx", pmem_dtb); pmem_log("initializing pte_mmap module"); pmem_log("pmem driver loaded, physical memory available in /dev/%s", pmem_pmem_devname); return error; }
// Determines if a given physical address is inside a valid page frame. static boolean_t pmem_page_valid(addr64_t page) { // Make sure its inside the physical address range. if (page > (pmem_physmem_size - PAGE_SIZE)) { pmem_log("Warning, page %#016llx is not inside valid range", page); return FALSE; } return TRUE; }
// Map a specific page into kernel virtual memory. // Supports multiple methods of mapping, the one used is specified in // pmem_mmap_method. Due to the nature of the mapping, old mappings become // invalid after calling this function again with a new mapping. // // Args: // page: The physical address of the page to map. // vaddr: A pointer to the pointer that will be set with the address where the // page has been mapped. // // Returns: // KERN_SUCCESS or KERN_FAILURE. // static kern_return_t pmem_map_physical_page(uint64_t page, void **vaddr) { kern_return_t status = KERN_FAILURE; static IOMemoryDescriptor *page_desc = NULL; static IOMemoryMap *page_map = NULL; // Freeing these objects destroys the created mapping, so we have to keep a // reference around until the next call. if (page_desc != NULL) { page_desc->release(); page_desc = NULL; } if (page_map != NULL) { page_map->release(); page_map = NULL; } switch (pmem_mmap_method) { case PMEM_MMAP_IOKIT: page_desc = IOMemoryDescriptor::withPhysicalAddress(page, PAGE_SIZE, kIODirectionIn); if (!page_desc) { goto error; } page_map = page_desc->createMappingInTask(kernel_task, 0, kIODirectionIn, 0, 0); if (!page_map) { goto error; } *vaddr = reinterpret_cast<void *>(page_map->getAddress()); break; case PMEM_MMAP_PTE: if (pte_mmap == NULL) { pte_mmap = pte_mmap_osx_new(); if (pte_mmap == NULL) { pmem_log("unable to initialize pte_mmap module, aborting..."); return KERN_FAILURE; } } if (pte_mmap->remap_page(pte_mmap, page) != PTE_SUCCESS) { goto error; } *vaddr = pte_mmap->rogue_page.pointer; break; default: goto error; } status = KERN_SUCCESS; error: return status; }
// Handles ioctl's from userspace. See all defined ioctl codes in pmem_ioctls.h. static kern_return_t pmem_ioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) { int error = 0; switch (cmd) { case PMEM_IOCTL_GET_MMAP_SIZE: pmem_log("Passing size of memory map to user space"); // copyout() is handled by the kernel, as we get passed an integral value *(reinterpret_cast<int32_t *>(data)) = pmem_mmap_size; break; case PMEM_IOCTL_GET_MMAP_DESC_SIZE: pmem_log("Passing size of memory map descriptor to user space"); // copyout() is handled by the kernel, as we get passed an integral value *(reinterpret_cast<int32_t *>(data)) = pmem_mmap_desc_size; break; case PMEM_IOCTL_GET_MMAP: // Boot arguments are obtained through the platform expert, // which in turn got them handed by the EFI. pmem_log("Copying memory map to user space"); // in this case we get a pointer so we must use copyout() error = copyout(pmem_mmap, *(reinterpret_cast<uint64_t *>(data)), pmem_mmap_size); if (error != 0) { pmem_error("Error %d, copyout failed for memory map", error); return EFAULT; } break; case PMEM_IOCTL_GET_DTB: *(reinterpret_cast<int64_t *>(data)) = pmem_dtb; break; case PMEM_IOCTL_SET_MMAP_METHOD: // Verify if the given method is valid switch (*(reinterpret_cast<int32_t *>(data))) { case PMEM_MMAP_IOKIT: pmem_log("Setting mmap method to IOKit"); pmem_mmap_method = *(reinterpret_cast<int32_t *>(data)); break; case PMEM_MMAP_PTE: pmem_log("Setting mmap method to manual PTE remapping"); pmem_mmap_method = *(reinterpret_cast<int32_t *>(data)); break; default: pmem_log("Unknown mmap method %lld, ignoring ioctl SET_MMAP_METHOD", *(reinterpret_cast<int32_t *>(data))); } break; default: pmem_error("Illegal ioctl %08lx", cmd); return EFAULT; } return KERN_SUCCESS; }
// Driver entry point. Initializes globals and registers driver node in /dev. kern_return_t chipsec_start(kmod_info_t * ki, void *d) { int error = 0; pmem_log("Loading /dev/%s driver", chipsec_devname); // Memory allocations are tagged to prevent leaks pmem_tag = OSMalloc_Tagalloc(pmem_tagname, OSMT_DEFAULT); // Allocate one page for zero padding of illegal read requests pmem_zero_page = static_cast<uint8_t *>(OSMalloc(PAGE_SIZE, pmem_tag)); if (pmem_zero_page == NULL) { pmem_error("Failed to allocate memory for page buffer"); return pmem_cleanup(KERN_FAILURE); } bzero(pmem_zero_page, PAGE_SIZE); // Install the character device chipsec_dev_major = cdevsw_add(-1, &pmem_cdevsw); if (chipsec_dev_major == -1) { pmem_error("Failed to create character device"); return pmem_cleanup(KERN_FAILURE); } // Create physical memory device file pmem_log("Adding node /dev/%s", chipsec_devname); pmem_devpmemnode = devfs_make_node(makedev(chipsec_dev_major, chipsec_dev_minor), DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0660, chipsec_devname); if (pmem_devpmemnode == NULL) { pmem_error("Failed to create /dev/%s node", chipsec_devname); return pmem_cleanup(KERN_FAILURE); } pmem_log("pmem driver loaded, physical memory available in /dev/%s", chipsec_devname); return error; }
// Copy the requested amount to userspace if it doesn't cross page boundaries // or memory mapped io. If it does, stop at the boundary. Will copy zeroes // if the given physical address is not backed by physical memory. // // args: uio is the userspace io request object // return: number of bytes copied successfully // static uint64_t pmem_partial_read(struct uio *uio, addr64_t start_addr, addr64_t end_addr) { // Separate page and offset uint64_t page_offset = start_addr & PAGE_MASK; addr64_t page = trunc_page_64(start_addr); // don't copy across page boundaries uint32_t chunk_len = (uint32_t)MIN(PAGE_SIZE - page_offset, end_addr - start_addr); // Prepare the page for IOKit IOMemoryDescriptor *page_desc = ( IOMemoryDescriptor::withPhysicalAddress(page, PAGE_SIZE, kIODirectionIn)); if (page_desc == NULL) { pmem_error("Can't read from %#016llx, address not in physical memory range", start_addr); // Skip this range as it is not even in the physical address space return chunk_len; } else { // Map the page containing address into kernel address space. IOMemoryMap *page_map = ( page_desc->createMappingInTask(kernel_task, 0, kIODirectionIn, 0, 0)); // Check if the mapping succeded. if (!page_map) { pmem_error("page %#016llx could not be mapped into the kernel, " "zero padding return buffer", page); // Zero pad this chunk, as it is not inside a valid page frame. uiomove64((addr64_t)pmem_zero_page + page_offset, (uint32_t)chunk_len, uio); } else { // Successfully mapped page, copy contents... pmem_log("partial_read"); log_addr(page_map->getAddress(), 64, "page_map->getAddress()"); log_addr(page_offset, 64, "page_offset"); uiomove64(page_map->getAddress() + page_offset, (uint32_t)chunk_len, uio); page_map->release(); } page_desc->release(); } return chunk_len; }
// Driver cleanup function, frees all memory and removes device nodes. kern_return_t pmem_stop(kmod_info_t *ki, void *d) { pmem_log("Unloading /dev/%s driver", pmem_pmem_devname); return pmem_cleanup(KERN_SUCCESS); }
/* Handles ioctl's from userspace. See ioctl codes in chipsec-common/chipsec_ioctl.h */ static kern_return_t pmem_ioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) { //TODO (dynamically allocate these) pci_msg_t kpci; mmio_msg_t kmmio; cr_msg_t kcr; io_msg_t kio; msr_msg_t kmsr; cpuid_msg_t kcpuid; swsmi_msg_t kswsmi; hypercall_msg_t khypercall; msgbus_msg_t kmsgbus; cpudes_msg_t kcpudes; alloc_pmem_msg_t kalloc_pmem; pmem_log("cmd = %x", cmd); switch (cmd) { case CHIPSEC_IOC_RDPCI: pmem_log("RDPCI"); log_addr((uint64_t) data, 64, "data"); log_addr((uint64_t) &kpci, 64, "&krdpci"); bcopy(data, &kpci, sizeof(pci_msg_t)); pmem_log("ReadPCICfg(%lx, %lx, %lx, %lx, %lx)", kpci.bus, kpci.device, kpci.function, kpci.offset, kpci.length); kpci.value = ReadPCICfg(kpci.bus, kpci.device, kpci.function, kpci.offset, kpci.length); pmem_log("kpci.value = %08x", kpci.value); bcopy(&kpci, data, sizeof(pci_msg_t)); break; case CHIPSEC_IOC_WRPCI: pmem_log("WRPCI"); bcopy(data, &kpci, sizeof(pci_msg_t)); pmem_log("WritePCICfg(%lx, %lx, %lx, %lx, %lx, %lx)", kpci.bus, kpci.device, kpci.function, kpci.offset, kpci.length, kpci.value); WritePCICfg(kpci.bus, kpci.device, kpci.function, kpci.offset, kpci.length, kpci.value); break; case CHIPSEC_IOC_RDMMIO: pmem_log("RDMMIO"); bcopy(data, &kmmio, sizeof(mmio_msg_t)); pmem_log("ReadMMIO(%lx, %x)", kmmio.addr, kmmio.length); kmmio.value = ReadMMIO(kmmio.addr, kmmio.length); pmem_log("val = %08llx", kmmio.value); bcopy(&kmmio, data, sizeof(mmio_msg_t)); break; case CHIPSEC_IOC_WRMMIO: pmem_log("WRMMIO"); bcopy(data, &kmmio, sizeof(mmio_msg_t)); pmem_log("WriteMMIO(%lx, %x, %x)", kmmio.addr, kmmio.length, (uint32_t) kmmio.value); WriteMMIO(kmmio.addr, kmmio.length, kmmio.value); break; case CHIPSEC_IOC_RDCR: pmem_log("RDCR"); bcopy(data, &kcr, sizeof(cr_msg_t)); pmem_log("ReadCR%d()", kcr.register_number); switch(kcr.register_number) { case 0: kcr.value = ReadCR0(); break; case 2: kcr.value = ReadCR2(); break; case 3: kcr.value = ReadCR3(); break; case 4: kcr.value = ReadCR4(); break; case 8: kcr.value = ReadCR8(); break; default: pmem_error("Incorrect CR number"); break; } bcopy(&kcr, data, sizeof(cr_msg_t)); break; case CHIPSEC_IOC_WRCR: pmem_log("WRCR"); bcopy(data, &kcr, sizeof(cr_msg_t)); pmem_log("WriteCR%d(%x)", kcr.register_number, kcr.value); switch(kcr.register_number) { case 0: WriteCR0(kcr.value); break; case 2: WriteCR2(kcr.value); break; case 3: WriteCR3(kcr.value); break; case 4: WriteCR4(kcr.value); break; case 8: WriteCR8(kcr.value); break; default: pmem_error("Incorrect CR number"); break; } bcopy(&kcr, data, sizeof(cr_msg_t)); break; case CHIPSEC_IOC_RDIO: pmem_log("RDIO"); bcopy(data,&kio, sizeof(io_msg_t)); pmem_log("ReadIO %i from %x", kio.size, kio.port); kio.value = ReadIOPort((uint32_t)kio.port, kio.size); bcopy(&kio,data,sizeof(io_msg_t)); break; case CHIPSEC_IOC_WRIO: pmem_log("WRIO"); bcopy(data,&kio, sizeof(io_msg_t)); pmem_log("WriteIO %x to %x size %d", kio.value, kio.port,kio.size); WriteIOPort((uint32_t)kio.port, kio.size, (uint32_t)kio.value); break; case CHIPSEC_IOC_RDMSR: pmem_log("RDMSR"); bcopy(data,&kmsr, sizeof(msr_msg_t)); pmem_log("ReadMSR %x", kmsr.msr_num); ReadMSR(kmsr.msr_num, &kmsr.msr_lo, &kmsr.msr_hi); bcopy(&kmsr,data,sizeof(msr_msg_t)); break; case CHIPSEC_IOC_WRMSR: pmem_log("WRMSR"); bcopy(data,&kmsr, sizeof(msr_msg_t)); pmem_log("WriteMSR %x with %x%x", kmsr.msr_num, kmsr.msr_hi,kmsr.msr_lo); WriteMSR(kmsr.msr_num, kmsr.msr_lo, kmsr.msr_hi); break; case CHIPSEC_IOC_CPUID: pmem_log("CPUID"); bcopy(data,&kcpuid, sizeof(cpuid_msg_t)); pmem_log("WriteMSR rax %x rcx %x", kcpuid.rax, kcpuid.rcx); chipCPUID(&kcpuid); bcopy(&kcpuid, data, sizeof(cpuid_msg_t)); break; case CHIPSEC_IOC_SWSMI: pmem_log("SWSMI"); bcopy(data,&kswsmi, sizeof(swsmi_msg_t)); pmem_log("Blah"); SWSMI(&kswsmi); bcopy(&kswsmi, data, sizeof(swsmi_msg_t)); break; case CHIPSEC_IOC_HYPERCALL: pmem_log("HYPERCALL"); bcopy(data,&khypercall, sizeof(hypercall_msg_t)); pmem_log("Hypercall Data"); khypercall.hypercall_page = (uint64_t) & hypercall_page; hypercall(khypercall.rdi, khypercall.rsi, khypercall.rdx, khypercall.rcx, khypercall.r8, khypercall.r9, khypercall.rax, khypercall.rbx, khypercall.r10, khypercall.r11, khypercall.xmm_buffer, khypercall.hypercall_page); bcopy(&khypercall,data, sizeof(hypercall_msg_t)); break; case CHIPSEC_IOC_MSGBUS_SEND_MESSAGE: pmem_log("MSGBUG SEND MESSAGE"); bcopy(data,&kmsgbus, sizeof(msgbus_msg_t)); pmem_log("MSGBUS DATA:"); if (kmsgbus.direction & MSGBUS_MDR_IN_MASK){ //Write data to MDR register WritePCICfg(MSGBUS_BUS, MSGBUS_DEV, MSGBUS_FUN, MDR, 4, (uint32_t)kmsgbus.mdr); } //TODO investigate comment (from linux driver) //Write extended address to MCRX register if address is > 0xff if (kmsgbus.mcrx != 0){ WritePCICfg(MSGBUS_BUS, MSGBUS_DEV, MSGBUS_FUN, MCRX, 4, (uint32_t)kmsgbus.mcrx); } //Write to MCR register to send the message on the message bus WritePCICfg(MSGBUS_BUS, MSGBUS_DEV, MSGBUS_FUN, MCR, 4, (uint32_t)kmsgbus.mcr); if (kmsgbus.direction & MSGBUS_MDR_OUT_MASK){ //Read data from MDR register kmsgbus.mdr_out = ReadPCICfg(MSGBUS_BUS, MSGBUS_DEV, MSGBUS_FUN, MDR, 4); } bcopy(&kmsgbus, data, sizeof(msgbus_msg_t)); break; case CHIPSEC_IOC_CPU_DESCRIPTOR_TABLE: descriptor_table_record kdtr; IOMemoryDescriptor* io_desc; IOMemoryMap* io_map; pmem_log("GET CPU DESCRIPTOR TABLE"); bcopy(data, &kcpudes, sizeof(cpudes_msg_t)); pmem_log("GET_CPU_DESCRIPTOR TABLE %x thread %d", kcpudes.des_table_code, kcpudes.cpu_thread_id); switch (kcpudes.des_table_code) { case CPU_DT_CODE_GDTR: store_gdtr(&kdtr); break; case CPU_DT_CODE_LDTR: store_ldtr(&kdtr); break; case CPU_DT_CODE_IDTR: store_idtr(&kdtr); break; } xlate_pa_va(kdtr.base, &io_desc, &io_map); kcpudes.limit = kdtr.limit; kcpudes.base_hi = (kdtr.base >> 32); kcpudes.base_lo = (kdtr.base & 0xFFFFFFFF); kcpudes.pa_hi = (io_map->getPhysicalAddress() >> 32); kcpudes.pa_lo = (io_map->getPhysicalAddress() & 0xFFFFFFFF); bcopy(&kcpudes, data, sizeof(cpudes_msg_t)); break; case CHIPSEC_IOC_ALLOC_PHYSMEM: void *va; IOMemoryDescriptor* io_desc1; IOMemoryMap* io_map1; pmem_log("ALLOC PHYSMEM"); bcopy(data, &kalloc_pmem, sizeof(alloc_pmem_msg_t)); pmem_log("Allocating %x memory, with pa limit of %x", kalloc_pmem.num_bytes, kalloc_pmem.max_addr); va = IOMalloc((uint32_t)kalloc_pmem.num_bytes); if (!va){ pmem_log("Could not allocate memory"); return -EFAULT; } memset(va, 0, kalloc_pmem.num_bytes); if ( xlate_pa_va((addr64_t) va, &io_desc1, &io_map1) ){ pmem_log("Could not map memory"); } if (io_map1->getPhysicalAddress() > kalloc_pmem.max_addr){ pmem_log("Allocate memory is above max_pa"); } kalloc_pmem.virt_addr = (uint64_t)va; kalloc_pmem.phys_addr = io_map1->getPhysicalAddress(); bcopy(&kalloc_pmem, data, sizeof(alloc_pmem_msg_t)); break; default: pmem_error("Illegal ioctl %08lx", cmd); return -EFAULT; } return KERN_SUCCESS; }