static void try_shift_conv( out_ctx *octx, enum op_type *binop, const out_val **lhs, const out_val **rhs) { if(type_is_signed((*lhs)->t)) return; if(*binop == op_divide && (*rhs)->type == V_CONST_I){ integral_t k = (*rhs)->bits.val_i; if((k & (k - 1)) == 0){ /* power of two, can shift */ out_val *mut; *binop = op_shiftr; *rhs = mut = v_dup_or_reuse(octx, *rhs, (*rhs)->t); mut->bits.val_i = log2(k); } }else if(*binop == op_multiply){ const out_val **vconst = (*lhs)->type == V_CONST_I ? lhs : rhs; integral_t k = (*vconst)->bits.val_i; if((k & (k - 1)) == 0){ out_val *mut; *binop = op_shiftl; *vconst = mut = v_dup_or_reuse(octx, *vconst, (*vconst)->t); mut->bits.val_i = log2(k); if(vconst == lhs){ /* need to swap as shift expects the constant to be rhs */ const out_val *tmp = *lhs; *lhs = *rhs; *rhs = tmp; } } } }
const out_val *out_op_unary(out_ctx *octx, enum op_type uop, const out_val *val) { switch(uop){ case op_plus: return val; default: break; } /* special case - reverse the flag if possible */ switch(val->type){ case V_FLAG: if(uop == op_not){ out_val *reversed = v_dup_or_reuse(octx, val, val->t); reversed->bits.flag.cmp = v_not_cmp(reversed->bits.flag.cmp); return reversed; } break; case V_CONST_I: if(cc1_fopt.const_fold){ switch(uop){ #define OP(op, tok) \ case op_ ## op: { \ out_val *dup = v_dup_or_reuse( \ octx, val, val->t); \ dup->bits.val_i = tok dup->bits.val_i; \ return dup; \ } OP(not, !); OP(minus, -); OP(bnot, ~); #undef OP default: assert(0 && "invalid unary op"); } } break; default: break; } return impl_op_unary(octx, uop, val); }
static out_val *try_mem_offset( out_ctx *octx, enum op_type binop, const out_val *vconst, const out_val *vregp_or_lbl, const out_val *rhs) { int step; /* if it's a minus, we enforce an order */ if((binop == op_plus || (binop == op_minus && vconst == rhs)) && (vregp_or_lbl->type != V_LBL || (fopt_mode & FOPT_SYMBOL_ARITH)) && (step = calc_ptr_step(vregp_or_lbl->t)) != -1) { out_val *mut_vregp_or_lbl = v_dup_or_reuse( octx, vregp_or_lbl, vregp_or_lbl->t); long *p; switch(mut_vregp_or_lbl->type){ case V_LBL: p = &mut_vregp_or_lbl->bits.lbl.offset; break; case V_REG: p = &mut_vregp_or_lbl->bits.regoff.offset; break; default: assert(0); } *p += (binop == op_minus ? -1 : 1) * vconst->bits.val_i * step; out_val_consume(octx, vconst); return mut_vregp_or_lbl; } return NULL; }
static void apply_ptr_step( out_ctx *octx, const out_val **lhs, const out_val **rhs, const out_val **div_out) { int l_ptr = !!type_is((*lhs)->t, type_ptr); int r_ptr = !!type_is((*rhs)->t, type_ptr); int ptr_step; if(!l_ptr && !r_ptr) return; ptr_step = calc_ptr_step((l_ptr ? *lhs : *rhs)->t); if(l_ptr ^ r_ptr){ /* ptr +/- int, adjust the non-ptr by sizeof *ptr */ const out_val **incdec = (l_ptr ? rhs : lhs); out_val *mut_incdec; *incdec = mut_incdec = v_dup_or_reuse(octx, *incdec, (*incdec)->t); switch(mut_incdec->type){ case V_CONST_I: if(ptr_step == -1){ *incdec = out_op(octx, op_multiply, *incdec, vla_size( type_next((l_ptr ? *lhs : *rhs)->t), octx)); mut_incdec = NULL; /* safety */ }else{ mut_incdec->bits.val_i *= ptr_step; } break; case V_CONST_F: assert(0 && "float pointer inc?"); case V_LBL: case V_FLAG: case V_REG_SPILT: assert(mut_incdec->retains == 1); *incdec = (out_val *)v_to_reg(octx, *incdec); case V_REG: { const out_val *n; if(ptr_step == -1){ n = vla_size( type_next((l_ptr ? *lhs : *rhs)->t), octx); }else{ n = out_new_l( octx, type_nav_btype(cc1_type_nav, type_intptr_t), ptr_step); } *incdec = (out_val *)out_op(octx, op_multiply, *incdec, n); break; } } }else if(l_ptr && r_ptr){ /* difference - divide afterwards */ if(ptr_step == -1){ *div_out = vla_size(type_next((*lhs)->t), octx); }else{ *div_out = out_new_l(octx, type_ptr_to(type_nav_btype(cc1_type_nav, type_void)), ptr_step); } } }