/* Given an instruction corresponding to a call, validate that the generated * return address is safe. * Parameters: * vstate - The state of the validator. */ static void NaClValidateCallAlignment(NaClValidatorState* vstate) { /* The return is safe only if it begins at an aligned address (since * return instructions are not explicit jumps). */ NaClPcAddress next_addr = vstate->cur_inst_state->inst_addr + NaClInstStateLength(vstate->cur_inst_state); if (next_addr & vstate->bundle_mask) { NaClPcAddress printable_next_addr = NaClInstStatePrintableAddress(vstate->cur_inst_state) + NaClInstStateLength(vstate->cur_inst_state); /* NOTE: Previously the validator recorded an error for call instructions * that were not aligned against the end of a bundle, as these, while * safe, are not correct with the current code generation idioms. * This #if defined(ERROR_ON_CALL_BUNDLE_ALIGNMENT) was added to allow * experimentation with different call/return idioms. */ if (!NACL_FLAGS_unsafe_single_inst_mode) { NaClValidatorInstMessage( #if defined(ERROR_ON_CALL_BUNDLE_ALIGNMENT) LOG_ERROR, #else LOG_WARNING, #endif vstate, vstate->cur_inst_state, "Bad call alignment, return pc = %"NACL_PRIxNaClPcAddress"\n", printable_next_addr); } } }
void NaClInstStateInstPrint(struct Gio* file, NaClInstState* state) { int i; const NaClInst* inst; /* Print out the address and the inst bytes. */ int length = NaClInstStateLength(state); DEBUG_OR_ERASE( NaClInstPrint(file, state->decoder_tables, NaClInstStateInst(state))); DEBUG(NaClExpVectorPrint(file, state)); gprintf(file, "%"NACL_PRIxNaClPcAddressAll": ", NaClInstStatePrintableAddress(state)); for (i = 0; i < length; ++i) { gprintf(file, "%02"NACL_PRIx8" ", NaClInstStateByte(state, i)); } for (i = length; i < NACL_MAX_BYTES_PER_X86_INSTRUCTION; ++i) { gprintf(file, " "); } /* Print out the assembly instruction it disassembles to. */ inst = NaClInstStateInst(state); NaClPrintDisassembled(file, state, inst); gprintf(file, "\n"); }
uint8_t NaClInstLength(NaClInstStruct *inst) { return NaClInstStateLength(inst); }
void NaClCpuCheck(struct NaClValidatorState* state, struct NaClInstIter* iter) { Bool squash_me = FALSE; switch (state->cur_inst->insttype) { case NACLi_X87: case NACLi_X87_FSINCOS: NaClCheckFeature(NaClCPUFeatureX86_x87, state, &squash_me); break; case NACLi_SFENCE_CLFLUSH: /* TODO(bradchen): distinguish between SFENCE and CLFLUSH */ NaClCheckFeature(NaClCPUFeatureX86_CLFLUSH, state, &squash_me); NaClCheckFeature(NaClCPUFeatureX86_FXSR, state, &squash_me); break; case NACLi_CMPXCHG8B: NaClCheckFeature(NaClCPUFeatureX86_CX8, state, &squash_me); break; case NACLi_CMPXCHG16B: NaClCheckFeature(NaClCPUFeatureX86_CX16, state, &squash_me); break; case NACLi_CMOV: NaClCheckFeature(NaClCPUFeatureX86_CMOV, state, &squash_me); break; case NACLi_FCMOV: if (!(NaClGetCPUFeatureX86(&state->cpu_features, NaClCPUFeatureX86_CMOV) && NaClGetCPUFeatureX86(&state->cpu_features, NaClCPUFeatureX86_x87))) { if (!state->cpu_checks.f_CMOV_and_x87) { NaClValidatorInstMessage( LOG_WARNING, state, state->cur_inst_state, "CPU model does not support CMOV and x87 instructions.\n"); state->cpu_checks.f_CMOV_and_x87 = TRUE; } squash_me = TRUE; } break; case NACLi_RDTSC: NaClCheckFeature(NaClCPUFeatureX86_TSC, state, &squash_me); break; case NACLi_MMX: NaClCheckFeature(NaClCPUFeatureX86_MMX, state, &squash_me); break; case NACLi_MMXSSE2: /* Note: We accept these instructions if either MMX or SSE2 bits */ /* are set, in case MMX instructions go away someday... */ if (!(NaClGetCPUFeatureX86(&state->cpu_features, NaClCPUFeatureX86_MMX) || NaClGetCPUFeatureX86(&state->cpu_features, NaClCPUFeatureX86_SSE2))) { if (!state->cpu_checks.f_MMX_or_SSE2) { NaClValidatorInstMessage( LOG_WARNING, state, state->cur_inst_state, "CPU model does not support MMX or SSE2 instructions.\n"); state->cpu_checks.f_MMX_or_SSE2 = TRUE; } } squash_me = TRUE; break; case NACLi_SSE: NaClCheckFeature(NaClCPUFeatureX86_SSE, state, &squash_me); break; case NACLi_SSE2: NaClCheckFeature(NaClCPUFeatureX86_SSE2, state, &squash_me); break; case NACLi_SSE3: NaClCheckFeature(NaClCPUFeatureX86_SSE3, state, &squash_me); break; case NACLi_SSE4A: NaClCheckFeature(NaClCPUFeatureX86_SSE4A, state, &squash_me); break; case NACLi_SSE41: NaClCheckFeature(NaClCPUFeatureX86_SSE41, state, &squash_me); break; case NACLi_SSE42: NaClCheckFeature(NaClCPUFeatureX86_SSE42, state, &squash_me); break; case NACLi_MOVBE: NaClCheckFeature(NaClCPUFeatureX86_MOVBE, state, &squash_me); break; case NACLi_POPCNT: NaClCheckFeature(NaClCPUFeatureX86_POPCNT, state, &squash_me); break; case NACLi_LZCNT: NaClCheckFeature(NaClCPUFeatureX86_LZCNT, state, &squash_me); break; case NACLi_SSSE3: NaClCheckFeature(NaClCPUFeatureX86_SSSE3, state, &squash_me); break; case NACLi_3DNOW: NaClCheckFeature(NaClCPUFeatureX86_3DNOW, state, &squash_me); break; case NACLi_E3DNOW: NaClCheckFeature(NaClCPUFeatureX86_E3DNOW, state, &squash_me); break; case NACLi_LONGMODE: /* TODO(karl): Remove this when NACLi_LONGMODE is no longer needed */ NaClCheckFeature(NaClCPUFeatureX86_LM, state, &squash_me); break; case NACLi_SSE2x: /* This case requires CPUID checking code */ /* DATA16 prefix required */ if (!(state->cur_inst_state->prefix_mask & kPrefixDATA16)) { NaClValidatorInstMessage( LOG_ERROR, state, state->cur_inst_state, "SSEx instruction must use prefix 0x66.\n"); } NaClCheckFeature(NaClCPUFeatureX86_SSE2, state, &squash_me); break; default: /* This instruction could be either legal or illegal, but if we * get here it is not CPU-dependent. */ break; } if (state->cur_inst->flags & NACL_IFLAG(LongMode)) { NaClCheckFeature(NaClCPUFeatureX86_LM, state, &squash_me); } if (squash_me) { if (state->readonly_text) { NaClValidatorInstMessage( LOG_ERROR, state, state->cur_inst_state, "Read-only text: cannot squash unsupported instruction.\n"); } else { /* Replace all bytes of the instruction with the HLT instruction. */ NCStubOutMem(state, NaClInstIterGetInstMemoryInline(iter), NaClInstStateLength(state->cur_inst_state)); } } }