static void store_aux(IRSB *irsb, IREndness endian, IRExpr *addr, IRExpr *data) { if (typeOfIRExpr(irsb->tyenv, data) == Ity_D64) { /* The insn selectors do not support writing a DFP value to memory. So we need to fix it here by reinterpreting the DFP value as an integer and storing that. */ data = unop(Iop_ReinterpD64asI64, data); } if (typeOfIRExpr(irsb->tyenv, data) == Ity_I1) { /* We cannot store a single bit. So we store it in a 32-bit container. See also load_aux. */ data = unop(Iop_1Uto32, data); } stmt(irsb, IRStmt_Store(endian, addr, data)); }
static IRExpr * load_aux(IREndness endian, IRType type, IRExpr *addr) { if (type == Ity_D64) { /* The insn selectors do not support loading a DFP value from memory. So we need to fix it here by loading an integer value and reinterpreting it as DFP. */ return unop(Iop_ReinterpI64asD64, IRExpr_Load(endian, Ity_I64, addr)); } if (type == Ity_I1) { /* A Boolean value is stored as a 32-bit entity (see store_aux). */ return unop(Iop_32to1, IRExpr_Load(endian, Ity_I32, addr)); } return IRExpr_Load(endian, type, addr); }
/* Store a value to memory. If a value requires more than 8 bytes a series of 8-byte stores will be generated. */ static __inline__ void store(IRSB *irsb, IREndness endian, HWord haddr, IRExpr *data) { IROp high, low; IRExpr *addr, *next_addr; if (VEX_HOST_WORDSIZE == 8) { addr = mkU64(haddr); next_addr = binop(Iop_Add64, addr, mkU64(8)); } else if (VEX_HOST_WORDSIZE == 4) { addr = mkU32(haddr); next_addr = binop(Iop_Add32, addr, mkU32(8)); } else { vpanic("invalid #bytes for address"); } IRType type = typeOfIRExpr(irsb->tyenv, data); vassert(type == Ity_I1 || sizeofIRType(type) <= 16); switch (type) { case Ity_I128: high = Iop_128HIto64; low = Iop_128to64; goto store128; case Ity_F128: high = Iop_F128HItoF64; low = Iop_F128LOtoF64; goto store128; case Ity_D128: high = Iop_D128HItoD64; low = Iop_D128LOtoD64; goto store128; store128: /* Two stores of 64 bit each. */ if (endian == Iend_BE) { /* The more significant bits are at the lower address. */ store_aux(irsb, endian, addr, unop(high, data)); store_aux(irsb, endian, next_addr, unop(low, data)); } else { /* The more significant bits are at the higher address. */ store_aux(irsb, endian, addr, unop(low, data)); store_aux(irsb, endian, next_addr, unop(high, data)); } return; default: store_aux(irsb, endian, addr, data); return; } }
/* Given a, possibly abbreviated, function name to run, look it up and * run it if found. it gets nargs arguments from the operand stack. */ void intrfunc (char *fname, int nargs) { int op_index, op; int i, n, subi[2]; int trim_side = TRIM_LEFT|TRIM_RIGHT; char *trim = " \t"; char sbuf[SZ_LINE+1]; struct operand o; op_index = keyword (ifnames, fname); if (op_index == KWBAD) cl_error (E_UERR, "unknown function `%s'", fname); if (op_index == KWAMBIG) cl_error (E_UERR, "ambiguous function `%s'", fname); op = optbl[op_index]; /* if do this by shifting the cases and op to the right OP_BITS, this * will compile as a jump table. not worth it until it gets larger. */ switch (op & ~OP_MASK) { case UNOP: if (nargs != 1) cl_error (E_UERR, e_onearg, ifnames[op_index]); unop (op & OP_MASK); break; case BINOP: if (nargs != 2) cl_error (E_UERR, e_twoargs, ifnames[op_index]); binop (op & OP_MASK); break; case MULTOP: multop (op & OP_MASK, op_index, nargs); break; case VOCOP: vocop (op & OP_MASK, op_index, nargs); break; case SAMPOP: sampop (op & OP_MASK, op_index, nargs); break; default: err: cl_error (E_IERR, e_badsw, op, "intrfunc()"); } }
/* <value to be subtracted from named parameter> . */ void o_subassign (memel *argp) { /* operands are backwards on stack, so negate and add. can get by * with this as long as subtraction is never defined for strings. * if it is someday, will have to do something like in addassign. */ char *pname = (char *) argp; char *pk, *t, *p, *f; struct param *pp; breakout (pname, &pk, &t, &p, &f); pp = paramsrch (pk, t, p); unop (OP_MINUS); validparamget (pp, *f); binop (OP_ADD); paramset (pp, *f); pp->p_flags |= P_SET; }
static PyObject * _unop(PyObject* self, PyObject* args) { Imaging out; Imaging im1; void (*unop)(Imaging, Imaging); Py_ssize_t op, i0, i1; if (!PyArg_ParseTuple(args, "nnn", &op, &i0, &i1)) return NULL; out = (Imaging) i0; im1 = (Imaging) i1; unop = (void*) op; unop(out, im1); Py_INCREF(Py_None); return Py_None; }
/* <op> . <- op> */ void o_chsign (void) { unop (OP_MINUS); }
static void expr(Node *n) { switch(n->t){ case NCOMMA: comma(n); break; case NCAST: cast(n); break; case NSTR: str(n); break; case NSIZEOF: outi("movq $%lld, %%rax\n", n->Sizeof.type->size); break; case NNUM: outi("movq $%lld, %%rax\n", n->Num.v); break; case NIDENT: ident(n); break; case NUNOP: unop(n); break; case NASSIGN: assign(n); break; case NBINOP: binop(n); break; case NIDX: idx(n); break; case NSEL: sel(n); break; case NCOND: cond(n); break; case NCALL: call(n); break; case NPTRADD: ptradd(n); break; case NINCDEC: incdec(n); break; case NBUILTIN: switch(n->Builtin.t) { case BUILTIN_VASTART: vastart(n); break; default: errorposf(&n->pos, "unimplemented builtin"); } break; default: errorf("unimplemented emit expr %d\n", n->t); } }
/* Inject IR stmts depending on the data provided in the control block iricb. */ void vex_inject_ir(IRSB *irsb, IREndness endian) { IRExpr *data, *rounding_mode, *opnd1, *opnd2, *opnd3, *opnd4; rounding_mode = NULL; if (iricb.rounding_mode != NO_ROUNDING_MODE) { rounding_mode = mkU32(iricb.rounding_mode); } switch (iricb.num_operands) { case 1: opnd1 = load(endian, iricb.t_opnd1, iricb.opnd1); if (rounding_mode) data = binop(iricb.op, rounding_mode, opnd1); else data = unop(iricb.op, opnd1); break; case 2: opnd1 = load(endian, iricb.t_opnd1, iricb.opnd1); /* HACK, compiler warning ‘opnd2’ may be used uninitialized */ opnd2 = opnd1; /* immediate_index = 0 immediate value is not used. * immediate_index = 2 opnd2 is an immediate value. */ vassert(iricb.immediate_index == 0 || iricb.immediate_index == 2); if (iricb.immediate_index == 2) { vassert((iricb.t_opnd2 == Ity_I8) || (iricb.t_opnd2 == Ity_I16) || (iricb.t_opnd2 == Ity_I32)); /* Interpret the memory as an ULong. */ if (iricb.immediate_type == Ity_I8) { opnd2 = mkU8(*((ULong *)iricb.opnd2)); } else if (iricb.immediate_type == Ity_I16) { opnd2 = mkU16(*((ULong *)iricb.opnd2)); } else if (iricb.immediate_type == Ity_I32) { opnd2 = mkU32(*((ULong *)iricb.opnd2)); } } else { opnd2 = load(endian, iricb.t_opnd2, iricb.opnd2); } if (rounding_mode) data = triop(iricb.op, rounding_mode, opnd1, opnd2); else data = binop(iricb.op, opnd1, opnd2); break; case 3: opnd1 = load(endian, iricb.t_opnd1, iricb.opnd1); opnd2 = load(endian, iricb.t_opnd2, iricb.opnd2); /* HACK, compiler warning ‘opnd3’ may be used uninitialized */ opnd3 = opnd2; /* immediate_index = 0 immediate value is not used. * immediate_index = 3 opnd3 is an immediate value. */ vassert(iricb.immediate_index == 0 || iricb.immediate_index == 3); if (iricb.immediate_index == 3) { vassert((iricb.t_opnd3 == Ity_I8) || (iricb.t_opnd3 == Ity_I16) || (iricb.t_opnd2 == Ity_I32)); if (iricb.immediate_type == Ity_I8) { opnd3 = mkU8(*((ULong *)iricb.opnd3)); } else if (iricb.immediate_type == Ity_I16) { opnd3 = mkU16(*((ULong *)iricb.opnd3)); } else if (iricb.immediate_type == Ity_I32) { opnd3 = mkU32(*((ULong *)iricb.opnd3)); } } else { opnd3 = load(endian, iricb.t_opnd3, iricb.opnd3); } if (rounding_mode) data = qop(iricb.op, rounding_mode, opnd1, opnd2, opnd3); else data = triop(iricb.op, opnd1, opnd2, opnd3); break; case 4: vassert(rounding_mode == NULL); opnd1 = load(endian, iricb.t_opnd1, iricb.opnd1); opnd2 = load(endian, iricb.t_opnd2, iricb.opnd2); opnd3 = load(endian, iricb.t_opnd3, iricb.opnd3); /* HACK, compiler warning ‘opnd4’ may be used uninitialized */ opnd4 = opnd3; /* immediate_index = 0 immediate value is not used. * immediate_index = 4 opnd4 is an immediate value. */ vassert(iricb.immediate_index == 0 || iricb.immediate_index == 4); if (iricb.immediate_index == 4) { vassert((iricb.t_opnd3 == Ity_I8) || (iricb.t_opnd3 == Ity_I16) || (iricb.t_opnd2 == Ity_I32)); if (iricb.immediate_type == Ity_I8) { opnd4 = mkU8(*((ULong *)iricb.opnd4)); } else if (iricb.immediate_type == Ity_I16) { opnd4 = mkU16(*((ULong *)iricb.opnd4)); } else if (iricb.immediate_type == Ity_I32) { opnd4 = mkU32(*((ULong *)iricb.opnd4)); } } else { opnd4 = load(endian, iricb.t_opnd4, iricb.opnd4); } data = qop(iricb.op, opnd1, opnd2, opnd3, opnd4); break; default: vpanic("unsupported operator"); } store(irsb, endian, iricb.result, data); if (0) { vex_printf("BEGIN inject\n"); if (iricb.t_result == Ity_I1 || sizeofIRType(iricb.t_result) <= 8) { ppIRStmt(irsb->stmts[irsb->stmts_used - 1]); } else if (sizeofIRType(iricb.t_result) == 16) { ppIRStmt(irsb->stmts[irsb->stmts_used - 2]); vex_printf("\n"); ppIRStmt(irsb->stmts[irsb->stmts_used - 1]); } vex_printf("\nEND inject\n"); } }
/* This version of flushEvents avoids callbacks entirely, except when the number of outstanding events is enough to be flushed - in which case a call to flush_data() is made. In all other cases, events are handled by creating IR to encode and store the memory access information to the array of outstanding events. */ static void flushEventsRange(IRSB* sb, Int start, Int size) { // Conditionally call the flush method if there's not enough room for // all the new events. This may flush an incomplete block. IRExpr *entries_addr = mkU64((ULong)&theEntries); IRExpr *entries = load(ENDIAN, Ity_I32, entries_addr); IRExpr *max_entries_addr = mkU64((ULong)&theMaxEntries); IRExpr *max_entries = load(ENDIAN, Ity_I32, max_entries_addr); IRDirty* di = unsafeIRDirty_0_N(0, "flush_data", VG_(fnptr_to_fnentry)( flush_data ), mkIRExprVec_0() ); di->guard = binop(Iop_CmpLT32S, max_entries, binop(Iop_Add32, entries, mkU32(size))); addStmtToIRSB( sb, IRStmt_Dirty(di) ); // Reload entries since it might have been changed by the callback entries = load(ENDIAN, Ity_I32, entries_addr); // Initialize the first address where we'll write trace information. // This will be advanced in the loop. IRExpr *addr = binop(Iop_Add64, load(ENDIAN, Ity_I64, mkU64((ULong)&theBlock)), unop(Iop_32Uto64, binop(Iop_Mul32, entries, mkU32(sizeof(MV_TraceAddr))))); // Grab the thread id IRExpr *thread = load(ENDIAN, Ity_I32, mkU64((ULong)&theThread)); Int i; for (i = start; i < start+size; i++) { Event* ev = &events[i]; uint32 type = 0; switch (ev->ekind) { case Event_Ir: type = MV_ShiftedInstr; break; case Event_Dr: type = MV_ShiftedRead; break; case Event_Dw: case Event_Dm: type = MV_ShiftedWrite; break; default: tl_assert(0); } type |= ev->type << MV_DataShift; type |= ((uint32)ev->size << MV_SizeShift); // Construct the address and store it IRExpr *data = binop(Iop_Or32, mkU32(type), thread); IRStmt *store; store = IRStmt_Store(ENDIAN, addr, ev->addr); addStmtToIRSB( sb, store ); // Advance to the type addr = binop(Iop_Add64, addr, mkU64(sizeof(uint64))); store = IRStmt_Store(ENDIAN, addr, data); addStmtToIRSB( sb, store ); // Advance to the next entry addr = binop(Iop_Add64, addr, mkU64(sizeof(MV_TraceAddr)-sizeof(uint64))); } // Store the new entry count IRStmt *entries_store = IRStmt_Store(ENDIAN, entries_addr, binop(Iop_Add32, entries, mkU32(size))); addStmtToIRSB( sb, entries_store ); }
expression_ptr cpp_from_isl::process_op(isl_ast_expr * ast_op) { int arg_count = isl_ast_expr_get_op_n_arg(ast_op); vector<expression_ptr> args; args.reserve(arg_count); for(int i = 0; i < arg_count; ++i) { auto ast_arg = isl_ast_expr_get_op_arg(ast_op, i); auto arg = process_expr(ast_arg); isl_ast_expr_free(ast_arg); args.push_back(arg); } expression_ptr expr; auto type = isl_ast_expr_get_op_type(ast_op); switch(type) { case isl_ast_op_and: expr = binop(op::logic_and, args[0], args[1]); break; case isl_ast_op_or: expr = binop(op::logic_or, args[0], args[1]); break; case isl_ast_op_max: expr = make_shared<call_expression>("max", args[0], args[1]); break; case isl_ast_op_min: expr = make_shared<call_expression>("min", args[0], args[1]); break; case isl_ast_op_minus: expr = unop(op::u_minus, args[0]); break; case isl_ast_op_add: expr = binop(op::add, args[0], args[1]); break; case isl_ast_op_sub: expr = binop(op::sub, args[0], args[1]); break; case isl_ast_op_mul: expr = binop(op::mult, args[0], args[1]); break; case isl_ast_op_div: expr = binop(op::div, args[0], args[1]); break; case isl_ast_op_eq: expr = binop(op::equal, args[0], args[1]); break; case isl_ast_op_le: expr = binop(op::lesser_or_equal, args[0], args[1]); break; case isl_ast_op_lt: expr = binop(op::lesser, args[0], args[1]); break; case isl_ast_op_ge: expr = binop(op::greater_or_equal, args[0], args[1]); break; case isl_ast_op_gt: expr = binop(op::greater, args[0], args[1]); break; case isl_ast_op_call: { auto id = dynamic_pointer_cast<id_expression>(args[0]); if (!id) throw error("Function identifier expression is not an identifier."); vector<expression_ptr> func_args(++args.begin(), args.end()); if (m_is_user_stmt && m_stmt_func) m_stmt_func(id->name, func_args, m_ctx); else expr = make_shared<call_expression>(id->name, func_args); break; } case isl_ast_op_zdiv_r: { // "Equal to zero iff the remainder on integer division is zero." expr = binop(op::rem, args[0], args[1]); break; } case isl_ast_op_pdiv_r: { //Remainder of integer division, where dividend is known to be non-negative. expr = binop(op::rem, args[0], args[1]); break; } case isl_ast_op_pdiv_q: { // Result of integer division, where dividend is known to be non-negative. expr = binop(op::div, args[0], args[1]); break; } case isl_ast_op_or_else: // not implemented case isl_ast_op_and_then: // not implemented case isl_ast_op_fdiv_q: // Not implemented // Result of integer division, rounded towards negative infinity. case isl_ast_op_cond: // Not implemented. case isl_ast_op_select: // Not implemented. case isl_ast_op_access: // Not implemented case isl_ast_op_member: // Not implemented default: throw error("Unsupported AST expression type."); } return expr; }
/* Inject IR stmts depending on the data provided in the control block iricb. */ void vex_inject_ir(IRSB *irsb, IREndness endian) { IRExpr *data, *rounding_mode, *opnd1, *opnd2, *opnd3, *opnd4; rounding_mode = NULL; if (iricb.rounding_mode != NO_ROUNDING_MODE) { rounding_mode = mkU32(iricb.rounding_mode); } switch (iricb.num_operands) { case 1: opnd1 = load(endian, iricb.t_opnd1, iricb.opnd1); if (rounding_mode) data = binop(iricb.op, rounding_mode, opnd1); else data = unop(iricb.op, opnd1); break; case 2: opnd1 = load(endian, iricb.t_opnd1, iricb.opnd1); if (iricb.shift_amount_is_immediate) { // This implies that the IROp is a shift op vassert(iricb.t_opnd2 == Ity_I8); opnd2 = mkU8(*((Char *)iricb.opnd2)); } else { opnd2 = load(endian, iricb.t_opnd2, iricb.opnd2); } if (rounding_mode) data = triop(iricb.op, rounding_mode, opnd1, opnd2); else data = binop(iricb.op, opnd1, opnd2); break; case 3: opnd1 = load(endian, iricb.t_opnd1, iricb.opnd1); opnd2 = load(endian, iricb.t_opnd2, iricb.opnd2); opnd3 = load(endian, iricb.t_opnd3, iricb.opnd3); if (rounding_mode) data = qop(iricb.op, rounding_mode, opnd1, opnd2, opnd3); else data = triop(iricb.op, opnd1, opnd2, opnd3); break; case 4: vassert(rounding_mode == NULL); opnd1 = load(endian, iricb.t_opnd1, iricb.opnd1); opnd2 = load(endian, iricb.t_opnd2, iricb.opnd2); opnd3 = load(endian, iricb.t_opnd3, iricb.opnd3); opnd4 = load(endian, iricb.t_opnd4, iricb.opnd4); data = qop(iricb.op, opnd1, opnd2, opnd3, opnd4); break; default: vpanic("unsupported operator"); } store(irsb, endian, iricb.result, data); if (0) { vex_printf("BEGIN inject\n"); if (iricb.t_result == Ity_I1 || sizeofIRType(iricb.t_result) <= 8) { ppIRStmt(irsb->stmts[irsb->stmts_used - 1]); } else if (sizeofIRType(iricb.t_result) == 16) { ppIRStmt(irsb->stmts[irsb->stmts_used - 2]); vex_printf("\n"); ppIRStmt(irsb->stmts[irsb->stmts_used - 1]); } vex_printf("\nEND inject\n"); } }