static Bool SerializeAllProcessors(void) { /* * We rely on the OS mprotect() call to issue interprocessor interrupts, * which will cause other processors to execute an IRET, which is * serializing. * * This code is based on two main considerations: * 1. Only switching the page from exec to non-exec state is guaranteed * to invalidate processors' instruction caches. * 2. It's bad to have a page that is both writeable and executable, * even if that happens not simultaneously. */ int size = NACL_MAP_PAGESIZE; if (NULL == g_squashybuffer) { if ((0 != NaClPageAlloc(&g_squashybuffer, size)) || (0 != NaClMprotect(g_squashybuffer, size, PROT_READ|PROT_WRITE))) { NaClLog(0, ("SerializeAllProcessors: initial squashybuffer allocation" " failed\n")); return FALSE; } NaClFillMemoryRegionWithHalt(g_squashybuffer, size); g_firstbyte = *(char *) g_squashybuffer; NaClLog(0, "SerializeAllProcessors: g_firstbyte is %d\n", g_firstbyte); } if ((0 != NaClMprotect(g_squashybuffer, size, PROT_READ|PROT_EXEC))) { NaClLog(0, ("SerializeAllProcessors: interprocessor interrupt" " generation failed: could not reverse shield polarity (1)\n")); return FALSE; } /* * Make a read to ensure that the potential kernel laziness * would not affect this hack. */ if (*(char *) g_squashybuffer != g_firstbyte) { NaClLog(0, ("SerializeAllProcessors: interprocessor interrupt" " generation failed: could not reverse shield polarity (2)\n")); NaClLog(0, "SerializeAllProcessors: g_firstbyte is %d\n", g_firstbyte); NaClLog(0, "SerializeAllProcessors: *g_squashybuffer is %d\n", *(char *) g_squashybuffer); return FALSE; } /* * We would like to set the protection to PROT_NONE, but on Windows * there's an ugly hack in NaClMprotect where PROT_NONE can result * in MEM_DECOMMIT, causing the contents of the page(s) to be lost! */ if (0 != NaClMprotect(g_squashybuffer, size, PROT_READ)) { NaClLog(0, ("SerializeAllProcessors: interprocessor interrupt" " generation failed: could not reverse shield polarity (3)\n")); return FALSE; } return TRUE; }
static void MakeDynamicCodePagesVisible(struct NaClApp *nap, uint32_t page_index_min, uint32_t page_index_max, uint8_t *writable_addr) { void *user_addr; uint32_t index; size_t size = (page_index_max - page_index_min) * NACL_MAP_PAGESIZE; for (index = page_index_min; index < page_index_max; index++) { CHECK(!BitmapIsBitSet(nap->dynamic_page_bitmap, index)); BitmapSetBit(nap->dynamic_page_bitmap, index); } user_addr = (void *) NaClUserToSys(nap, nap->dynamic_text_start + page_index_min * NACL_MAP_PAGESIZE); #if NACL_WINDOWS NaClUntrustedThreadsSuspendAll(nap, /* save_registers= */ 0); /* * The VirtualAlloc() call here has two effects: * * 1) It commits the page in the shared memory (SHM) object, * allocating swap space and making the page accessible. This * affects our writable mapping of the shared memory object too. * Before the VirtualAlloc() call, dereferencing writable_addr * would fault. * 2) It changes the page permissions of the mapping to * read+execute. Since this exposes the page in its unsafe, * non-HLT-filled state, this must be done with untrusted * threads suspended. */ { uintptr_t offset; for (offset = 0; offset < size; offset += NACL_MAP_PAGESIZE) { void *user_page_addr = (char *) user_addr + offset; if (VirtualAlloc(user_page_addr, NACL_MAP_PAGESIZE, MEM_COMMIT, PAGE_EXECUTE_READ) != user_page_addr) { NaClLog(LOG_FATAL, "MakeDynamicCodePagesVisible: " "VirtualAlloc() failed -- probably out of swap space\n"); } } } #endif /* Sanity check: Ensure the page is not already in use. */ CHECK(*writable_addr == 0); NaClFillMemoryRegionWithHalt(writable_addr, size); #if NACL_WINDOWS NaClUntrustedThreadsResumeAll(nap); #else if (NaClMprotect(user_addr, size, PROT_READ | PROT_EXEC) != 0) { NaClLog(LOG_FATAL, "MakeDynamicCodePageVisible: NaClMprotect() failed\n"); } #endif }
/* * Fill from static_text_end to end of that page with halt * instruction, which is at least NACL_HALT_LEN in size when no * dynamic text is present. Does not touch dynamic text region, which * should be pre-filled with HLTs. * * By adding NACL_HALT_SLED_SIZE, we ensure that the code region ends * with HLTs, just in case the CPU has a bug in which it fails to * check for running off the end of the x86 code segment. */ void NaClFillEndOfTextRegion(struct NaClApp *nap) { size_t page_pad; /* * NOTE: make sure we are not silently overwriting data. It is the * toolchain's responsibility to ensure that a NACL_HALT_SLED_SIZE * gap exists. */ if (0 != nap->data_start && nap->static_text_end + NACL_HALT_SLED_SIZE > NaClTruncAllocPage(nap->data_start)) { NaClLog(LOG_FATAL, "Missing gap between text and data for halt_sled\n"); } if (0 != nap->rodata_start && nap->static_text_end + NACL_HALT_SLED_SIZE > nap->rodata_start) { NaClLog(LOG_FATAL, "Missing gap between text and rodata for halt_sled\n"); } if (NULL == nap->text_shm) { /* * No dynamic text exists. Space for NACL_HALT_SLED_SIZE must * exist. */ page_pad = (NaClRoundAllocPage(nap->static_text_end + NACL_HALT_SLED_SIZE) - nap->static_text_end); CHECK(page_pad >= NACL_HALT_SLED_SIZE); CHECK(page_pad < NACL_MAP_PAGESIZE + NACL_HALT_SLED_SIZE); } else { /* * Dynamic text exists; the halt sled resides in the dynamic text * region, so all we need to do here is to round out the last * static text page with HLT instructions. It doesn't matter if * the size of this region is smaller than NACL_HALT_SLED_SIZE -- * this is just to fully initialize the page, rather than (later) * decoding/validating zero-filled memory as instructions. */ page_pad = NaClRoundAllocPage(nap->static_text_end) - nap->static_text_end; } NaClLog(4, "Filling with halts: %08"NACL_PRIxPTR", %08"NACL_PRIxS" bytes\n", nap->mem_start + nap->static_text_end, page_pad); NaClFillMemoryRegionWithHalt((void *)(nap->mem_start + nap->static_text_end), page_pad); nap->static_text_end += page_pad; }
int NaClPatchWindowsExceptionDispatcherWithCheck(void) { uint8_t *ntdll_routine = NaClGetKiUserExceptionDispatcher(); uint8_t *intercept_code; uint8_t *dest; uint32_t *reloc_addr; size_t template_size = (NaCl_exception_dispatcher_intercept_end - NaCl_exception_dispatcher_intercept); DWORD old_prot; /* * We patch the start of KiUserExceptionDispatcher with a 32-bit * relative jump to our routine, so our routine needs to be * relocated to within +/-2GB of KiUserExceptionDispatcher's * address. To avoid edge cases, we allocate within the smaller * range of +/-1.5GB. * * We use a 32-bit relative jump because it's short -- 5 bytes -- * which minimises the amount of code we have to overwrite and * relocate. If we did a 64-bit jump, we'd need to do something * like the following: * 50 push %rax * 48 b8 XX XX XX XX XX XX XX XX mov $0xXXXXXXXXXXXXXXXX, %rax * ff e0 jmpq *%rax * which is 13 bytes. */ const size_t kMaxDistance = 1536 << 20; /* * Check for the instructions: * fc cld * 48 8b 05 XX XX XX XX mov XXXXXXXX(%rip), %rax * * Note that if you set a breakpoint on KiUserExceptionDispatcher * with WinDbg, this check will fail because the first byte will * have been replaced with int3. */ size_t bytes_to_move = 8; size_t rip_relative_operand_offset = 4; if (ntdll_routine[0] == 0xfc && ntdll_routine[1] == 0x48 && ntdll_routine[2] == 0x8b && ntdll_routine[3] == 0x05) { NaClLog(2, "NaClPatchWindowsExceptionDispatcherWithCheck: " "Got instructions expected for Windows Vista and Windows 7\n"); } else { NaClLog(ERROR, "NaClPatchWindowsExceptionDispatcherWithCheck: " "Unexpected start instructions\n"); return 0; /* Failure */ } intercept_code = AllocatePageInRange(ntdll_routine - kMaxDistance, ntdll_routine + kMaxDistance); if (intercept_code == NULL) { NaClLog(LOG_FATAL, "NaClPatchWindowsExceptionDispatcherWithCheck: " "AllocatePageInRange() failed\n"); } /* Fill the page with HLTs just in case. */ NaClFillMemoryRegionWithHalt(intercept_code, NACL_MAP_PAGESIZE); dest = intercept_code; /* Copy template code and fill out a parameter in it. */ memcpy(dest, NaCl_exception_dispatcher_intercept, template_size); reloc_addr = (uint32_t *) (dest + (NaCl_exception_dispatcher_intercept_tls_index - NaCl_exception_dispatcher_intercept) + 1); CHECK(*reloc_addr == 0x12345678); *reloc_addr = _tls_index; dest += template_size; /* * Copy and relocate instructions from KiUserExceptionDispatcher * that we will be overwriting. */ memcpy(dest, ntdll_routine, bytes_to_move); RelocateRipRelative(dest, ntdll_routine, rip_relative_operand_offset); dest += bytes_to_move; /* * Lastly, write a jump that returns to the unmodified portion of * KiUserExceptionDispatcher. */ WriteJump32(dest, ntdll_routine + bytes_to_move); if (!VirtualProtect(intercept_code, NACL_MAP_PAGESIZE, PAGE_EXECUTE_READ, &old_prot)) { NaClLog(LOG_FATAL, "NaClPatchWindowsExceptionDispatcherWithCheck: VirtualProtect() " "failed to make our intercept routine executable\n"); } if (!VirtualProtect(ntdll_routine, kJump32Size + 1, PAGE_EXECUTE_READWRITE, &old_prot)) { NaClLog(LOG_FATAL, "NaClPatchWindowsExceptionDispatcherWithCheck: " "VirtualProtect() failed to make the routine writable\n"); } /* * We write the jump after the "cld" instruction so that, if you * want to set a breakpoint on KiUserExceptionDispatcher, you can * take out the checks for "cld" and this will still work. */ CHECK(ntdll_routine[0] == 0xfc); /* "cld" instruction */ WriteJump32(ntdll_routine + 1, intercept_code); if (!VirtualProtect(ntdll_routine, kJump32Size + 1, old_prot, &old_prot)) { NaClLog(LOG_FATAL, "NaClPatchWindowsExceptionDispatcherWithCheck: " "VirtualProtect() failed to restore page permissions\n"); } return 1; /* Success */ }
void NaClFillTrampolineRegion(struct NaClApp *nap) { NaClFillMemoryRegionWithHalt( (void *) (nap->mem_start + NACL_TRAMPOLINE_START), NACL_TRAMPOLINE_SIZE); }
int NaClMakeDispatchThunk(struct NaClApp *nap) { int retval = 0; /* fail */ int error; void *thunk_addr = NULL; struct NaClPatchInfo patch_info; struct NaClPatch jmp_target; NaClLog(LOG_WARNING, "Entered NaClMakeDispatchThunk\n"); if (0 != nap->dispatch_thunk) { NaClLog(LOG_ERROR, " dispatch_thunk already initialized!\n"); return 1; } if (0 != (error = NaCl_page_alloc_randomized(&thunk_addr, NACL_MAP_PAGESIZE))) { NaClLog(LOG_INFO, "NaClMakeDispatchThunk::NaCl_page_alloc failed, errno %d\n", -error); retval = 0; goto cleanup; } NaClLog(LOG_INFO, "NaClMakeDispatchThunk: got addr 0x%"NACL_PRIxPTR"\n", (uintptr_t) thunk_addr); if (0 != (error = NaCl_mprotect(thunk_addr, NACL_MAP_PAGESIZE, PROT_READ | PROT_WRITE))) { NaClLog(LOG_INFO, "NaClMakeDispatchThunk::NaCl_mprotect r/w failed, errno %d\n", -error); retval = 0; goto cleanup; } NaClFillMemoryRegionWithHalt(thunk_addr, NACL_MAP_PAGESIZE); jmp_target.target = (((uintptr_t) &NaClDispatchThunk_jmp_target) - sizeof(uintptr_t)); jmp_target.value = (uintptr_t) NaClSyscallSeg; NaClPatchInfoCtor(&patch_info); patch_info.abs64 = &jmp_target; patch_info.num_abs64 = 1; patch_info.dst = (uintptr_t) thunk_addr; patch_info.src = (uintptr_t) &NaClDispatchThunk; patch_info.nbytes = ((uintptr_t) &NaClDispatchThunkEnd - (uintptr_t) &NaClDispatchThunk); NaClApplyPatchToMemory(&patch_info); if (0 != (error = NaCl_mprotect(thunk_addr, NACL_MAP_PAGESIZE, PROT_EXEC|PROT_READ))) { NaClLog(LOG_INFO, "NaClMakeDispatchThunk::NaCl_mprotect r/x failed, errno %d\n", -error); retval = 0; goto cleanup; } retval = 1; cleanup: if (0 == retval) { if (NULL != thunk_addr) { NaCl_page_free(thunk_addr, NACL_MAP_PAGESIZE); thunk_addr = NULL; } } else { nap->dispatch_thunk = (uintptr_t) thunk_addr; } return retval; }