コード例 #1
0
void X86::X86MCNaClExpander::expandIndirectBranch(const MCInst &Inst,
                                                  MCStreamer &Out,
                                                  const MCSubtargetInfo &STI) {
  bool ThroughMemory = false, isCall = false;
  switch (Inst.getOpcode()) {
  case X86::CALL16m:
  case X86::CALL32m:
    ThroughMemory = true;
  case X86::CALL16r:
  case X86::CALL32r:
    isCall = true;
    break;
  case X86::JMP16m:
  case X86::JMP32m:
    ThroughMemory = true;
  case X86::JMP16r:
  case X86::JMP32r:
    break;
  default:
    llvm_unreachable("invalid indirect jmp/call");
  }

  MCOperand Target;
  if (ThroughMemory) {
    if (numScratchRegs() == 0) {
      Error(Inst, "No scratch registers specified");
      exit(1);
    }

    Target = MCOperand::CreateReg(getReg32(getScratchReg(0)));

    MCInst Mov;
    Mov.setOpcode(X86::MOV32rm);
    Mov.addOperand(Target);
    Mov.addOperand(Inst.getOperand(0)); // Base
    Mov.addOperand(Inst.getOperand(1)); // Scale
    Mov.addOperand(Inst.getOperand(2)); // Index
    Mov.addOperand(Inst.getOperand(3)); // Offset
    Mov.addOperand(Inst.getOperand(4)); // Segment
    Out.EmitInstruction(Mov, STI);
  } else {
    Target = MCOperand::CreateReg(getReg32(Inst.getOperand(0).getReg()));
  }

  Out.EmitBundleLock(isCall);

  MCInst And;
  And.setOpcode(X86::AND32ri8);
  And.addOperand(Target);
  And.addOperand(Target);
  And.addOperand(MCOperand::CreateImm(-kBundleSize));
  Out.EmitInstruction(And, STI);

  MCInst Branch;
  Branch.setOpcode(isCall ? X86::CALL32r : X86::JMP32r);
  Branch.addOperand(Target);
  Out.EmitInstruction(Branch, STI);

  Out.EmitBundleUnlock();
}
コード例 #2
0
ファイル: X86MCNaCl.cpp プロジェクト: sriramnrn/llvm-port
static void EmitREST(const MCInst &Inst, unsigned Reg32,
                     bool IsMem, MCStreamer &Out) {
  unsigned Reg64 = getX86SubSuperRegister_(Reg32, MVT::i64);
  Out.EmitBundleLock(false);
  if (!IsMem) {
    EmitMoveRegReg(false, Reg32, Inst.getOperand(0).getReg(), Out);
  } else {
    unsigned IndexOpPosition;
    MCInst SandboxedInst = Inst;
    if (SandboxMemoryRef(&SandboxedInst, &IndexOpPosition)) {
      HandleMemoryRefTruncation(&SandboxedInst, IndexOpPosition, Out);
      ShortenMemoryRef(&SandboxedInst, IndexOpPosition);
    }
    EmitLoad(false,
             Reg32,
             SandboxedInst.getOperand(0).getReg(),  // BaseReg
             SandboxedInst.getOperand(1).getImm(),  // Scale
             SandboxedInst.getOperand(2).getReg(),  // IndexReg
             SandboxedInst.getOperand(3).getImm(),  // Offset
             SandboxedInst.getOperand(4).getReg(),  // SegmentReg
             Out);
  }

  EmitRegFix(Reg64, Out);
  Out.EmitBundleUnlock();
}
コード例 #3
0
static void EmitDirectGuardCall(int I, MCInst Saved[],
                                MCStreamer &Out) {
  // sfi_call_preamble cond=
  //   sfi_nops_to_force_slot3
  assert(I == 2 && (ARM::SFI_GUARD_CALL == Saved[0].getOpcode()) &&
         "Unexpected SFI Pseudo while lowering SFI_GUARD_CALL");
  Out.EmitBundleLock(true);
  Out.EmitInstruction(Saved[1]);
  Out.EmitBundleUnlock();
}
コード例 #4
0
ファイル: X86MCNaCl.cpp プロジェクト: sriramnrn/llvm-port
static void EmitDirectCall(const MCOperand &Op, bool Is64Bit,
                           MCStreamer &Out) {
  Out.EmitBundleLock(true);

  MCInst CALLInst;
  CALLInst.setOpcode(Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32);
  CALLInst.addOperand(Op);
  Out.EmitInstruction(CALLInst);
  Out.EmitBundleUnlock();
}
コード例 #5
0
static void EmitGuardLoadOrStoreTst(int I, MCInst Saved[], MCStreamer &Out) {
  // sfi_cstore_preamble reg -->
  //   sfi_nop_if_at_bundle_end
  //   sfi_data_tst \reg
  assert(I == 2 && (ARM::SFI_GUARD_LOADSTORE_TST == Saved[0].getOpcode()) &&
         "Unexpected SFI Pseudo while lowering");
  unsigned Reg = Saved[0].getOperand(0).getReg();

  Out.EmitBundleLock(false);
  EmitTST(Out, Reg);
  Out.EmitInstruction(Saved[1]);
  Out.EmitBundleUnlock();
}
コード例 #6
0
static void EmitGuardReturn(int I, MCInst Saved[], MCStreamer &Out) {
  // sfi_return_preamble reg cond=
  //    sfi_nop_if_at_bundle_end
  //    sfi_code_mask \reg \cond
  assert(I == 2 && (ARM::SFI_GUARD_RETURN == Saved[0].getOpcode()) &&
         "Unexpected SFI Pseudo while lowering SFI_GUARD_RETURN");
  int64_t Pred = Saved[0].getOperand(0).getImm();

  Out.EmitBundleLock(false);
  EmitBICMask(Out, ARM::LR, Pred, 0xC000000F);
  Out.EmitInstruction(Saved[1]);
  Out.EmitBundleUnlock();
}
コード例 #7
0
static void EmitGuardLoadOrStore(int I, MCInst Saved[], MCStreamer &Out) {
  // sfi_store_preamble reg cond ---->
  //    sfi_nop_if_at_bundle_end
  //    sfi_data_mask \reg, \cond
  assert(I == 2 && (ARM::SFI_GUARD_LOADSTORE == Saved[0].getOpcode()) &&
         "Unexpected SFI Pseudo while lowering SFI_GUARD_RETURN");
  unsigned Reg = Saved[0].getOperand(0).getReg();
  int64_t Pred = Saved[0].getOperand(2).getImm();

  Out.EmitBundleLock(false);
  EmitBICMask(Out, Reg, Pred, 0xC0000000);
  Out.EmitInstruction(Saved[1]);
  Out.EmitBundleUnlock();
}
コード例 #8
0
static void EmitIndirectGuardCall(int I, MCInst Saved[],
                                  MCStreamer &Out) {
  // sfi_indirect_call_preamble link cond=
  //   sfi_nops_to_force_slot2
  //   sfi_code_mask \link \cond
  assert(I == 2 && (ARM::SFI_GUARD_INDIRECT_CALL == Saved[0].getOpcode()) &&
         "Unexpected SFI Pseudo while lowering SFI_GUARD_CALL");
  unsigned Reg = Saved[0].getOperand(0).getReg();
  int64_t Pred = Saved[0].getOperand(2).getImm();
  Out.EmitBundleLock(true);
  EmitBICMask(Out, Reg, Pred, 0xC000000F);
  Out.EmitInstruction(Saved[1]);
  Out.EmitBundleUnlock();
}
コード例 #9
0
ファイル: MipsMCNaCl.cpp プロジェクト: 8l/emscripten-fastcomp
static void EmitIndirectGuardJmp(int I, MCInst Saved[], MCStreamer &Out) {
  //  sfi_indirect_jump_preamble link --->
  //    sfi_nop_if_at_bundle_end
  //    sfi_code_mask \link \link \maskreg
  assert(I == 2 && (Mips::SFI_GUARD_INDIRECT_JMP == Saved[0].getOpcode()) &&
         "Unexpected SFI Pseudo while lowering SFI_GUARD_INDIRECT_JMP");
  unsigned Addr = Saved[0].getOperand(0).getReg();
  unsigned Mask = Saved[0].getOperand(2).getReg();

  Out.EmitBundleLock(false);
  EmitMask(Out, Addr, Mask);
  Out.EmitInstruction(Saved[1]);
  Out.EmitBundleUnlock();
}
コード例 #10
0
ファイル: X86MCNaCl.cpp プロジェクト: sriramnrn/llvm-port
static void EmitSPArith(unsigned Opc, const MCOperand &ImmOp,
                        MCStreamer &Out) {
  Out.EmitBundleLock(false);

  MCInst Tmp;
  Tmp.setOpcode(Opc);
  Tmp.addOperand(MCOperand::CreateReg(X86::RSP));
  Tmp.addOperand(MCOperand::CreateReg(X86::RSP));
  Tmp.addOperand(ImmOp);
  Out.EmitInstruction(Tmp);

  EmitRegFix(X86::RSP, Out);
  Out.EmitBundleUnlock();
}
コード例 #11
0
ファイル: MipsMCNaCl.cpp プロジェクト: 8l/emscripten-fastcomp
static void EmitGuardLoadOrStore(int I, MCInst Saved[], MCStreamer &Out) {
  // sfi_load_store_preamble reg --->
  //    sfi_nop_if_at_bundle_end
  //    sfi_data_mask \reg \reg \maskreg
  assert(I == 2 && (Mips::SFI_GUARD_LOADSTORE == Saved[0].getOpcode()) &&
         "Unexpected SFI Pseudo while lowering SFI_GUARD_LOADSTORE");
  unsigned Reg = Saved[0].getOperand(0).getReg();
  unsigned Mask = Saved[0].getOperand(2).getReg();

  Out.EmitBundleLock(false);
  EmitMask(Out, Reg, Mask);
  Out.EmitInstruction(Saved[1]);
  Out.EmitBundleUnlock();
}
コード例 #12
0
ファイル: MipsMCNaCl.cpp プロジェクト: 8l/emscripten-fastcomp
// This is ONLY used for sandboxing stack changes.
// The reason why SFI_NOP_IF_AT_BUNDLE_END gets handled here is that
// it must ensure that the two instructions are in the same bundle.
// It just so happens that the SFI_NOP_IF_AT_BUNDLE_END is always
// emitted in conjunction with a SFI_DATA_MASK
//
static void EmitDataMask(int I, MCInst Saved[], MCStreamer &Out) {
  assert(I == 3 &&
         (Mips::SFI_NOP_IF_AT_BUNDLE_END == Saved[0].getOpcode()) &&
         (Mips::SFI_DATA_MASK == Saved[2].getOpcode()) &&
         "Unexpected SFI Pseudo while lowering");

  unsigned Addr = Saved[2].getOperand(0).getReg();
  unsigned Mask = Saved[2].getOperand(2).getReg();
  assert((Mips::SP == Addr) && "Unexpected register at stack guard");

  Out.EmitBundleLock(false);
  Out.EmitInstruction(Saved[1]);
  EmitMask(Out, Addr, Mask);
  Out.EmitBundleUnlock();
}
コード例 #13
0
ファイル: X86MCNaCl.cpp プロジェクト: sriramnrn/llvm-port
static void EmitSPAdj(const MCOperand &ImmOp, MCStreamer &Out) {
  Out.EmitBundleLock(false);

  MCInst Tmp;
  Tmp.setOpcode(X86::LEA64_32r);
  Tmp.addOperand(MCOperand::CreateReg(X86::RSP)); // DestReg
  Tmp.addOperand(MCOperand::CreateReg(X86::RBP)); // BaseReg
  Tmp.addOperand(MCOperand::CreateImm(1));        // Scale
  Tmp.addOperand(MCOperand::CreateReg(0));        // IndexReg
  Tmp.addOperand(ImmOp);                          // Offset
  Tmp.addOperand(MCOperand::CreateReg(0));        // SegmentReg
  Out.EmitInstruction(Tmp);

  EmitRegFix(X86::RSP, Out);
  Out.EmitBundleUnlock();
}
コード例 #14
0
ファイル: MipsMCNaCl.cpp プロジェクト: 8l/emscripten-fastcomp
static void EmitIndirectGuardCall(int I, MCInst Saved[],
                                  MCStreamer &Out) {
  // sfi_indirect_call_preamble link --->
  //   sfi_nops_to_force_slot1
  //   sfi_code_mask \link \link \maskreg
  assert(I == 3 && (Mips::SFI_GUARD_INDIRECT_CALL == Saved[0].getOpcode()) &&
         "Unexpected SFI Pseudo while lowering SFI_GUARD_INDIRECT_CALL");

  unsigned Addr = Saved[0].getOperand(0).getReg();
  unsigned Mask = Saved[0].getOperand(2).getReg();

  Out.EmitBundleLock(true);
  EmitMask(Out, Addr, Mask);
  Out.EmitInstruction(Saved[1]);
  Out.EmitInstruction(Saved[2]);
  Out.EmitBundleUnlock();
}
コード例 #15
0
ファイル: X86MCNaCl.cpp プロジェクト: 8l/emscripten-fastcomp
static void EmitDirectCall(const MCOperand &Op, bool Is64Bit,
                           MCStreamer &Out) {
  const bool HideSandboxBase = (FlagHideSandboxBase &&
                                Is64Bit && !FlagUseZeroBasedSandbox);
  if (HideSandboxBase) {
    // For NaCl64, the sequence
    //   call target
    //   return_addr:
    // is changed to
    //   push return_addr
    //   jmp target
    //   .align 32
    //   return_addr:
    // This avoids exposing the sandbox base address via the return
    // address on the stack.

    MCContext &Context = Out.getContext();

    // Generate a label for the return address.
    MCSymbol *RetTarget = CreateTempLabel(Context, "DirectCallRetAddr");
    const MCExpr *RetTargetExpr = MCSymbolRefExpr::Create(RetTarget, Context);

    // push return_addr
    MCInst PUSHInst;
    PUSHInst.setOpcode(X86::PUSH64i32);
    PUSHInst.addOperand(MCOperand::CreateExpr(RetTargetExpr));
    Out.EmitInstruction(PUSHInst);

    // jmp target
    MCInst JMPInst;
    JMPInst.setOpcode(X86::JMP_4);
    JMPInst.addOperand(Op);
    Out.EmitInstruction(JMPInst);

    Out.EmitCodeAlignment(kNaClX86InstructionBundleSize);
    Out.EmitLabel(RetTarget);
  } else {
    Out.EmitBundleLock(true);

    MCInst CALLInst;
    CALLInst.setOpcode(Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32);
    CALLInst.addOperand(Op);
    Out.EmitInstruction(CALLInst);
    Out.EmitBundleUnlock();
  }
}
コード例 #16
0
// This is ONLY used for loads into the stack pointer.
static void EmitGuardSpLoad(int I, MCInst Saved[], MCStreamer &Out) {
  assert(I == 4 &&
         (ARM::SFI_GUARD_SP_LOAD == Saved[0].getOpcode()) &&
         (ARM::SFI_NOP_IF_AT_BUNDLE_END == Saved[1].getOpcode()) &&
         (ARM::SFI_DATA_MASK == Saved[3].getOpcode()) &&
         "Unexpected SFI Pseudo while lowering");

  unsigned AddrReg = Saved[0].getOperand(0).getReg();
  unsigned SpReg = Saved[3].getOperand(0).getReg();
  int64_t  Pred = Saved[3].getOperand(2).getImm();
  assert((ARM::SP == SpReg) && "Unexpected register at stack guard");

  Out.EmitBundleLock(false);
  EmitBICMask(Out, AddrReg, Pred, 0xC0000000);
  Out.EmitInstruction(Saved[2]);
  EmitBICMask(Out, SpReg, Pred, 0xC0000000);
  Out.EmitBundleUnlock();
}
コード例 #17
0
ファイル: X86MCNaCl.cpp プロジェクト: sriramnrn/llvm-port
static void EmitIndirectBranch(const MCOperand &Op, bool Is64Bit, bool IsCall,
                               MCStreamer &Out) {
  const bool UseZeroBasedSandbox = FlagUseZeroBasedSandbox;
  const int JmpMask = FlagSfiX86JmpMask;
  const unsigned Reg32 = Op.getReg();
  const unsigned Reg64 = getX86SubSuperRegister_(Reg32, MVT::i64);

  Out.EmitBundleLock(IsCall);

  MCInst ANDInst;
  ANDInst.setOpcode(X86::AND32ri8);
  ANDInst.addOperand(MCOperand::CreateReg(Reg32));
  ANDInst.addOperand(MCOperand::CreateReg(Reg32));
  ANDInst.addOperand(MCOperand::CreateImm(JmpMask));
  Out.EmitInstruction(ANDInst);

  if (Is64Bit && !UseZeroBasedSandbox) {
    MCInst InstADD;
    InstADD.setOpcode(X86::ADD64rr);
    InstADD.addOperand(MCOperand::CreateReg(Reg64));
    InstADD.addOperand(MCOperand::CreateReg(Reg64));
    InstADD.addOperand(MCOperand::CreateReg(X86::R15));
    Out.EmitInstruction(InstADD);
  }

  if (IsCall) {
    MCInst CALLInst;
    CALLInst.setOpcode(Is64Bit ? X86::CALL64r : X86::CALL32r);
    CALLInst.addOperand(MCOperand::CreateReg(Is64Bit ? Reg64 : Reg32));
    Out.EmitInstruction(CALLInst);
  } else {
    MCInst JMPInst;
    JMPInst.setOpcode(Is64Bit ? X86::JMP64r : X86::JMP32r);
    JMPInst.addOperand(MCOperand::CreateReg(Is64Bit ? Reg64 : Reg32));
    Out.EmitInstruction(JMPInst);
  }
  Out.EmitBundleUnlock();
}
コード例 #18
0
ファイル: X86MCNaCl.cpp プロジェクト: sriramnrn/llvm-port
static void EmitTLSAddr32(const MCInst &Inst, MCStreamer &Out) {
  Out.EmitBundleLock(true);

  MCInst LeaInst;
  LeaInst.setOpcode(X86::LEA32r);
  LeaInst.addOperand(MCOperand::CreateReg(X86::EAX));    // DestReg
  LeaInst.addOperand(Inst.getOperand(0)); // BaseReg
  LeaInst.addOperand(Inst.getOperand(1)); // Scale
  LeaInst.addOperand(Inst.getOperand(2)); // IndexReg
  LeaInst.addOperand(Inst.getOperand(3)); // Offset
  LeaInst.addOperand(Inst.getOperand(4)); // SegmentReg
  Out.EmitInstruction(LeaInst);

  MCInst CALLInst;
  CALLInst.setOpcode(X86::CALLpcrel32);
  MCContext &context = Out.getContext();
  const MCSymbolRefExpr *expr =
    MCSymbolRefExpr::Create(
      context.GetOrCreateSymbol(StringRef("___tls_get_addr")),
      MCSymbolRefExpr::VK_PLT, context);
  CALLInst.addOperand(MCOperand::CreateExpr(expr));
  Out.EmitInstruction(CALLInst);
  Out.EmitBundleUnlock();
}
コード例 #19
0
ファイル: X86MCNaCl.cpp プロジェクト: 8l/emscripten-fastcomp
static void EmitIndirectBranch(const MCOperand &Op, bool Is64Bit, bool IsCall,
                               MCStreamer &Out) {
  const bool HideSandboxBase = (FlagHideSandboxBase &&
                                Is64Bit && !FlagUseZeroBasedSandbox);
  const int JmpMask = FlagSfiX86JmpMask;
  unsigned Reg32 = Op.getReg();

  // For NaCl64, the sequence
  //   jmp *%rXX
  // is changed to
  //   mov %rXX,%r11d
  //   and $0xffffffe0,%r11d
  //   add %r15,%r11
  //   jmpq *%r11
  //
  // And the sequence
  //   call *%rXX
  //   return_addr:
  // is changed to
  //   mov %rXX,%r11d
  //   push return_addr
  //   and $0xffffffe0,%r11d
  //   add %r15,%r11
  //   jmpq *%r11
  //   .align 32
  //   return_addr:
  //
  // This avoids exposing the sandbox base address via the return
  // address on the stack.

  // For NaCl64, force an assignment of the branch target into r11,
  // and subsequently use r11 as the ultimate branch target, so that
  // only r11 (which will never be written to memory) exposes the
  // sandbox base address.  But avoid a redundant assignment if the
  // original branch target is already r11 or r11d.
  const unsigned SafeReg32 = X86::R11D;
  const unsigned SafeReg64 = X86::R11;
  if (HideSandboxBase) {
    // In some cases, EmitIndirectBranch() is called with a 32-bit
    // register Op (e.g. r11d), and in other cases a 64-bit register
    // (e.g. r11), so we need to test both variants to avoid a
    // redundant assignment.  TODO(stichnot): Make callers consistent
    // on 32 vs 64 bit register.
    if ((Reg32 != SafeReg32) && (Reg32 != SafeReg64)) {
      MCInst MOVInst;
      MOVInst.setOpcode(X86::MOV32rr);
      MOVInst.addOperand(MCOperand::CreateReg(SafeReg32));
      MOVInst.addOperand(MCOperand::CreateReg(Reg32));
      Out.EmitInstruction(MOVInst);
      Reg32 = SafeReg32;
    }
  }
  const unsigned Reg64 = getX86SubSuperRegister_(Reg32, MVT::i64);

  // Explicitly push the (32-bit) return address for a NaCl64 call
  // instruction.
  MCSymbol *RetTarget = NULL;
  if (IsCall && HideSandboxBase) {
    MCContext &Context = Out.getContext();

    // Generate a label for the return address.
    RetTarget = CreateTempLabel(Context, "IndirectCallRetAddr");
    const MCExpr *RetTargetExpr = MCSymbolRefExpr::Create(RetTarget, Context);

    // push return_addr
    MCInst PUSHInst;
    PUSHInst.setOpcode(X86::PUSH64i32);
    PUSHInst.addOperand(MCOperand::CreateExpr(RetTargetExpr));
    Out.EmitInstruction(PUSHInst);
  }

  const bool WillEmitCallInst = IsCall && !HideSandboxBase;
  Out.EmitBundleLock(WillEmitCallInst);

  MCInst ANDInst;
  ANDInst.setOpcode(X86::AND32ri8);
  ANDInst.addOperand(MCOperand::CreateReg(Reg32));
  ANDInst.addOperand(MCOperand::CreateReg(Reg32));
  ANDInst.addOperand(MCOperand::CreateImm(JmpMask));
  Out.EmitInstruction(ANDInst);

  if (Is64Bit && !FlagUseZeroBasedSandbox) {
    MCInst InstADD;
    InstADD.setOpcode(X86::ADD64rr);
    InstADD.addOperand(MCOperand::CreateReg(Reg64));
    InstADD.addOperand(MCOperand::CreateReg(Reg64));
    InstADD.addOperand(MCOperand::CreateReg(X86::R15));
    Out.EmitInstruction(InstADD);
  }

  if (WillEmitCallInst) {
    // callq *%rXX
    MCInst CALLInst;
    CALLInst.setOpcode(Is64Bit ? X86::CALL64r : X86::CALL32r);
    CALLInst.addOperand(MCOperand::CreateReg(Is64Bit ? Reg64 : Reg32));
    Out.EmitInstruction(CALLInst);
  } else {
    // jmpq *%rXX   -or-   jmpq *%r11
    MCInst JMPInst;
    JMPInst.setOpcode(Is64Bit ? X86::JMP64r : X86::JMP32r);
    JMPInst.addOperand(MCOperand::CreateReg(Is64Bit ? Reg64 : Reg32));
    Out.EmitInstruction(JMPInst);
  }
  Out.EmitBundleUnlock();
  if (RetTarget) {
    Out.EmitCodeAlignment(kNaClX86InstructionBundleSize);
    Out.EmitLabel(RetTarget);
  }
}