static rtx conforming_compare (rtx_insn *insn) { rtx set, src, dest; set = single_set (insn); if (set == NULL) return NULL; src = SET_SRC (set); if (GET_CODE (src) != COMPARE) return NULL; dest = SET_DEST (set); if (!REG_P (dest) || REGNO (dest) != targetm.flags_regnum) return NULL; if (!REG_P (XEXP (src, 0))) return NULL; if (CONSTANT_P (XEXP (src, 1)) || REG_P (XEXP (src, 1))) return src; if (GET_CODE (XEXP (src, 1)) == UNSPEC) { for (int i = 0; i < XVECLEN (XEXP (src, 1), 0); i++) if (!REG_P (XVECEXP (XEXP (src, 1), 0, i))) return NULL; return src; } return NULL; }
static int crx_addr_reg_p (rtx addr_reg) { rtx reg; if (REG_P (addr_reg)) { reg = addr_reg; } else if ((GET_CODE (addr_reg) == SUBREG && REG_P (SUBREG_REG (addr_reg)) && GET_MODE_SIZE (GET_MODE (SUBREG_REG (addr_reg))) <= UNITS_PER_WORD)) { reg = SUBREG_REG (addr_reg); } else return FALSE; if (GET_MODE (addr_reg) != Pmode) { return FALSE; } return TRUE; }
static bool should_replace_address (rtx old_rtx, rtx new_rtx, machine_mode mode, addr_space_t as, bool speed) { int gain; if (rtx_equal_p (old_rtx, new_rtx) || !memory_address_addr_space_p (mode, new_rtx, as)) return false; /* Copy propagation is always ok. */ if (REG_P (old_rtx) && REG_P (new_rtx)) return true; /* Prefer the new address if it is less expensive. */ gain = (address_cost (old_rtx, mode, as, speed) - address_cost (new_rtx, mode, as, speed)); /* If the addresses have equivalent cost, prefer the new address if it has the highest `set_src_cost'. That has the potential of eliminating the most insns without additional costs, and it is the same that cse.c used to do. */ if (gain == 0) gain = (set_src_cost (new_rtx, VOIDmode, speed) - set_src_cost (old_rtx, VOIDmode, speed)); return (gain > 0); }
static bool aarch_rev16_p_1 (rtx lhs, rtx rhs, enum machine_mode mode) { if (GET_CODE (lhs) == AND && GET_CODE (XEXP (lhs, 0)) == ASHIFT && CONST_INT_P (XEXP (XEXP (lhs, 0), 1)) && INTVAL (XEXP (XEXP (lhs, 0), 1)) == 8 && REG_P (XEXP (XEXP (lhs, 0), 0)) && CONST_INT_P (XEXP (lhs, 1)) && GET_CODE (rhs) == AND && GET_CODE (XEXP (rhs, 0)) == LSHIFTRT && REG_P (XEXP (XEXP (rhs, 0), 0)) && CONST_INT_P (XEXP (XEXP (rhs, 0), 1)) && INTVAL (XEXP (XEXP (rhs, 0), 1)) == 8 && CONST_INT_P (XEXP (rhs, 1)) && REGNO (XEXP (XEXP (rhs, 0), 0)) == REGNO (XEXP (XEXP (lhs, 0), 0))) { rtx lhs_mask = XEXP (lhs, 1); rtx rhs_mask = XEXP (rhs, 1); return aarch_rev16_shright_mask_imm_p (rhs_mask, mode) && aarch_rev16_shleft_mask_imm_p (lhs_mask, mode); } return false; }
static bool arithmetic_flags_clobber_p (rtx_insn *insn) { rtx pat, x; if (!NONJUMP_INSN_P (insn)) return false; pat = PATTERN (insn); if (extract_asm_operands (pat)) return false; if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) == 2) { x = XVECEXP (pat, 0, 0); if (GET_CODE (x) != SET) return false; x = SET_DEST (x); if (!REG_P (x)) return false; x = XVECEXP (pat, 0, 1); if (GET_CODE (x) == CLOBBER) { x = XEXP (x, 0); if (REG_P (x) && REGNO (x) == targetm.flags_regnum) return true; } } return false; }
/* Return true if INSN requires the stack frame to be set up. PROLOGUE_USED contains the hard registers used in the function prologue. SET_UP_BY_PROLOGUE is the set of registers we expect the prologue to set up for the function. */ bool requires_stack_frame_p (rtx_insn *insn, HARD_REG_SET prologue_used, HARD_REG_SET set_up_by_prologue) { df_ref def, use; HARD_REG_SET hardregs; unsigned regno; if (CALL_P (insn)) return !SIBLING_CALL_P (insn); /* We need a frame to get the unique CFA expected by the unwinder. */ if (cfun->can_throw_non_call_exceptions && can_throw_internal (insn)) return true; CLEAR_HARD_REG_SET (hardregs); FOR_EACH_INSN_DEF (def, insn) { rtx dreg = DF_REF_REG (def); if (!REG_P (dreg)) continue; add_to_hard_reg_set (&hardregs, GET_MODE (dreg), REGNO (dreg)); }
/* In case function does not return value, we get clobber of pseudo followed by set to hard return value. */ static rtx skip_unreturned_value (rtx orig_insn) { rtx insn = next_nonnote_insn (orig_insn); /* Skip possible clobber of pseudo return register. */ if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == CLOBBER && REG_P (XEXP (PATTERN (insn), 0)) && (REGNO (XEXP (PATTERN (insn), 0)) >= FIRST_PSEUDO_REGISTER)) { rtx set_insn = next_nonnote_insn (insn); rtx set; if (!set_insn) return insn; set = single_set (set_insn); if (!set || SET_SRC (set) != XEXP (PATTERN (insn), 0) || SET_DEST (set) != current_function_return_rtx) return insn; return set_insn; } return orig_insn; }
/* Create candidate for INSN with rematerialization operand NOP and REGNO. Insert the candidate into the table and set up the corresponding INSN_TO_CAND element. */ static void create_cand (rtx_insn *insn, int nop, int regno) { lra_insn_recog_data_t id = lra_get_insn_recog_data (insn); rtx reg = *id->operand_loc[nop]; gcc_assert (REG_P (reg)); int op_regno = REGNO (reg); gcc_assert (op_regno >= FIRST_PSEUDO_REGISTER); cand_t cand = XNEW (struct cand); cand->insn = insn; cand->nop = nop; cand->regno = regno; cand->reload_regno = op_regno == regno ? -1 : op_regno; gcc_assert (cand->regno >= 0); cand_t cand_in_table = insert_cand (cand); insn_to_cand[INSN_UID (insn)] = cand_in_table; if (cand != cand_in_table) free (cand); else { /* A new cand. */ cand->index = all_cands.length (); all_cands.safe_push (cand); cand->next_regno_cand = regno_cands[cand->regno]; regno_cands[cand->regno] = cand; } }
/* Check if all uses in DEF_INSN can be used in TARGET_INSN. This would require full computation of available expressions; we check only restricted conditions, see use_killed_between. */ static bool all_uses_available_at (rtx_insn *def_insn, rtx_insn *target_insn) { df_ref use; struct df_insn_info *insn_info = DF_INSN_INFO_GET (def_insn); rtx def_set = single_set (def_insn); rtx_insn *next; gcc_assert (def_set); /* If target_insn comes right after def_insn, which is very common for addresses, we can use a quicker test. Ignore debug insns other than target insns for this. */ next = NEXT_INSN (def_insn); while (next && next != target_insn && DEBUG_INSN_P (next)) next = NEXT_INSN (next); if (next == target_insn && REG_P (SET_DEST (def_set))) { rtx def_reg = SET_DEST (def_set); /* If the insn uses the reg that it defines, the substitution is invalid. */ FOR_EACH_INSN_INFO_USE (use, insn_info) if (rtx_equal_p (DF_REF_REG (use), def_reg)) return false; FOR_EACH_INSN_INFO_EQ_USE (use, insn_info) if (rtx_equal_p (DF_REF_REG (use), def_reg)) return false; }
static inline int ix86_carry_flag_operator_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) #line 1045 "../.././gcc/config/i386/predicates.md" { enum machine_mode inmode = GET_MODE (XEXP (op, 0)); enum rtx_code code = GET_CODE (op); if (!REG_P (XEXP (op, 0)) || REGNO (XEXP (op, 0)) != FLAGS_REG || XEXP (op, 1) != const0_rtx) return 0; if (inmode == CCFPmode || inmode == CCFPUmode) { enum rtx_code second_code, bypass_code; ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code); if (bypass_code != UNKNOWN || second_code != UNKNOWN) return 0; code = ix86_fp_compare_code_to_integer (code); } else if (inmode == CCCmode) return code == LTU || code == GTU; else if (inmode != CCmode) return 0; return code == LTU; }
rtx gen_lowpart_general (machine_mode mode, rtx x) { rtx result = gen_lowpart_common (mode, x); if (result) return result; /* Handle SUBREGs and hard REGs that were rejected by simplify_gen_subreg. */ else if (REG_P (x) || GET_CODE (x) == SUBREG) { result = gen_lowpart_common (mode, copy_to_reg (x)); gcc_assert (result != 0); return result; } else { /* The only additional case we can do is MEM. */ gcc_assert (MEM_P (x)); /* The following exposes the use of "x" to CSE. */ scalar_int_mode xmode; if (is_a <scalar_int_mode> (GET_MODE (x), &xmode) && GET_MODE_SIZE (xmode) <= UNITS_PER_WORD && TRULY_NOOP_TRUNCATION_MODES_P (mode, xmode) && !reload_completed) return gen_lowpart_general (mode, force_reg (xmode, x)); poly_int64 offset = byte_lowpart_offset (mode, GET_MODE (x)); return adjust_address (x, mode, offset); } }
/* Return true if insn is an instruction that sets a target register. if CHECK_CONST is true, only return true if the source is constant. If such a set is found and REGNO is nonzero, assign the register number of the destination register to *REGNO. */ static int insn_sets_btr_p (const_rtx insn, int check_const, int *regno) { rtx set; if (NONJUMP_INSN_P (insn) && (set = single_set (insn))) { rtx dest = SET_DEST (set); rtx src = SET_SRC (set); if (GET_CODE (dest) == SUBREG) dest = XEXP (dest, 0); if (REG_P (dest) && TEST_HARD_REG_BIT (all_btrs, REGNO (dest))) { gcc_assert (!btr_referenced_p (src, NULL)); if (!check_const || CONSTANT_P (src)) { if (regno) *regno = REGNO (dest); return 1; } } } return 0; }
/* If INSN can not be used for rematerialization, return negative value. If INSN can be considered as a candidate for rematerialization, return value which is the operand number of the pseudo for which the insn can be used for rematerialization. Here we consider the insns without any memory, spilled pseudo (except for the rematerialization pseudo), or dying or unused regs. */ static int operand_to_remat (rtx_insn *insn) { lra_insn_recog_data_t id = lra_get_insn_recog_data (insn); struct lra_static_insn_data *static_id = id->insn_static_data; struct lra_insn_reg *reg, *found_reg = NULL; /* First find a pseudo which can be rematerialized. */ for (reg = id->regs; reg != NULL; reg = reg->next) /* True FRAME_POINTER_NEEDED might be because we can not follow changing sp offsets, e.g. alloca is used. If the insn contains stack pointer in such case, we can not rematerialize it as we can not know sp offset at a rematerialization place. */ if (reg->regno == STACK_POINTER_REGNUM && frame_pointer_needed) return -1; else if (reg->type == OP_OUT && ! reg->subreg_p && find_regno_note (insn, REG_UNUSED, reg->regno) == NULL) { /* We permits only one spilled reg. */ if (found_reg != NULL) return -1; found_reg = reg; } if (found_reg == NULL) return -1; if (found_reg->regno < FIRST_PSEUDO_REGISTER) return -1; if (bad_for_rematerialization_p (PATTERN (insn))) return -1; /* Check the other regs are not spilled. */ for (reg = id->regs; reg != NULL; reg = reg->next) if (found_reg == reg) continue; else if (reg->type == OP_INOUT) return -1; else if (reg->regno >= FIRST_PSEUDO_REGISTER && reg_renumber[reg->regno] < 0) /* Another spilled reg. */ return -1; else if (reg->type == OP_IN) { if (find_regno_note (insn, REG_DEAD, reg->regno) != NULL) /* We don't want to make live ranges longer. */ return -1; /* Check that there is no output reg as the input one. */ for (struct lra_insn_reg *reg2 = id->regs; reg2 != NULL; reg2 = reg2->next) if (reg2->type == OP_OUT && reg->regno == reg2->regno) return -1; } /* Find the rematerialization operand. */ int nop = static_id->n_operands; for (int i = 0; i < nop; i++) if (REG_P (*id->operand_loc[i]) && (int) REGNO (*id->operand_loc[i]) == found_reg->regno) return i; return -1; }
bool nds32_expand_strlen (rtx result, rtx str, rtx target_char, rtx align ATTRIBUTE_UNUSED) { rtx base_reg, backup_base_reg; rtx ffb_result; rtx target_char_ptr, length; rtx loop_label, tmp; if (optimize_size || optimize < 3) return false; gcc_assert (MEM_P (str)); gcc_assert (CONST_INT_P (target_char) || REG_P (target_char)); base_reg = copy_to_mode_reg (SImode, XEXP (str, 0)); loop_label = gen_label_rtx (); ffb_result = gen_reg_rtx (Pmode); tmp = gen_reg_rtx (SImode); backup_base_reg = gen_reg_rtx (SImode); /* Emit loop version of strlen. move $backup_base, $base .Lloop: lmw.bim $tmp, [$base], $tmp, 0 ffb $ffb_result, $tmp, $target_char ! is there $target_char? beqz $ffb_result, .Lloop add $last_char_ptr, $base, $ffb_result sub $length, $last_char_ptr, $backup_base */ /* move $backup_base, $base */ emit_move_insn (backup_base_reg, base_reg); /* .Lloop: */ emit_label (loop_label); /* lmw.bim $tmp, [$base], $tmp, 0 */ emit_insn (gen_unaligned_load_update_base_w (base_reg, tmp, base_reg)); /* ffb $ffb_result, $tmp, $target_char ! is there $target_char? */ emit_insn (gen_unspec_ffb (ffb_result, tmp, target_char)); /* beqz $ffb_result, .Lloop */ emit_cmp_and_jump_insns (ffb_result, const0_rtx, EQ, NULL, SImode, 1, loop_label); /* add $target_char_ptr, $base, $ffb_result */ target_char_ptr = expand_binop (Pmode, add_optab, base_reg, ffb_result, NULL_RTX, 0, OPTAB_WIDEN); /* sub $length, $target_char_ptr, $backup_base */ length = expand_binop (Pmode, sub_optab, target_char_ptr, backup_base_reg, NULL_RTX, 0, OPTAB_WIDEN); emit_move_insn (result, length); return true; }
static void insert_value_copy_on_edge (edge e, int dest, tree src, source_location locus) { rtx seq, x; enum machine_mode dest_mode, src_mode; int unsignedp; tree var; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Inserting a value copy on edge BB%d->BB%d : PART.%d = ", e->src->index, e->dest->index, dest); print_generic_expr (dump_file, src, TDF_SLIM); fprintf (dump_file, "\n"); } gcc_assert (SA.partition_to_pseudo[dest]); set_location_for_edge (e); /* If a locus is provided, override the default. */ if (locus) set_curr_insn_source_location (locus); start_sequence (); var = SSA_NAME_VAR (partition_to_var (SA.map, dest)); src_mode = TYPE_MODE (TREE_TYPE (src)); dest_mode = GET_MODE (SA.partition_to_pseudo[dest]); gcc_assert (src_mode == TYPE_MODE (TREE_TYPE (var))); gcc_assert (!REG_P (SA.partition_to_pseudo[dest]) || dest_mode == promote_decl_mode (var, &unsignedp)); if (src_mode != dest_mode) { x = expand_expr (src, NULL, src_mode, EXPAND_NORMAL); x = convert_modes (dest_mode, src_mode, x, unsignedp); } else if (src_mode == BLKmode) { x = SA.partition_to_pseudo[dest]; store_expr (src, x, 0, false); } else x = expand_expr (src, SA.partition_to_pseudo[dest], dest_mode, EXPAND_NORMAL); if (x != SA.partition_to_pseudo[dest]) emit_move_insn (SA.partition_to_pseudo[dest], x); seq = get_insns (); end_sequence (); insert_insn_on_edge (seq, e); }
/* Generate code for transformations 3 and 4 (with MODE and OPERATION, operands OP1 and OP2, result TARGET, at most SUB subtractions, and probability of taking the optimal path(s) PROB1 and PROB2). */ static rtx gen_mod_subtract (enum machine_mode mode, enum rtx_code operation, rtx target, rtx op1, rtx op2, int sub, int prob1, int prob2) { rtx tmp, tmp1, jump; rtx end_label = gen_label_rtx (); rtx sequence; int i; start_sequence (); if (!REG_P (op2)) { tmp = gen_reg_rtx (mode); emit_move_insn (tmp, copy_rtx (op2)); } else tmp = op2; emit_move_insn (target, copy_rtx (op1)); do_compare_rtx_and_jump (target, tmp, LTU, 0, mode, NULL_RTX, NULL_RTX, end_label); /* Add branch probability to jump we just created. */ jump = get_last_insn (); REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (prob1), REG_NOTES (jump)); for (i = 0; i < sub; i++) { tmp1 = expand_simple_binop (mode, MINUS, target, tmp, target, 0, OPTAB_WIDEN); if (tmp1 != target) emit_move_insn (target, tmp1); do_compare_rtx_and_jump (target, tmp, LTU, 0, mode, NULL_RTX, NULL_RTX, end_label); /* Add branch probability to jump we just created. */ jump = get_last_insn (); REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (prob2), REG_NOTES (jump)); } tmp1 = simplify_gen_binary (operation, mode, copy_rtx (target), copy_rtx (tmp)); tmp1 = force_operand (tmp1, target); if (tmp1 != target) emit_move_insn (target, tmp1); emit_label (end_label); sequence = get_insns (); end_sequence (); rebuild_jump_labels (sequence); return sequence; }
static rtx propagate_rtx (rtx x, machine_mode mode, rtx old_rtx, rtx new_rtx, bool speed) { rtx tem; bool collapsed; int flags; if (REG_P (new_rtx) && REGNO (new_rtx) < FIRST_PSEUDO_REGISTER) return NULL_RTX; flags = 0; if (REG_P (new_rtx) || CONSTANT_P (new_rtx) || (GET_CODE (new_rtx) == SUBREG && REG_P (SUBREG_REG (new_rtx)) && (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (new_rtx)))))) flags |= PR_CAN_APPEAR; if (!varying_mem_p (new_rtx)) flags |= PR_HANDLE_MEM; if (speed) flags |= PR_OPTIMIZE_FOR_SPEED; tem = x; collapsed = propagate_rtx_1 (&tem, old_rtx, copy_rtx (new_rtx), flags); if (tem == x || !collapsed) return NULL_RTX; /* gen_lowpart_common will not be able to process VOIDmode entities other than CONST_INTs. */ if (GET_MODE (tem) == VOIDmode && !CONST_INT_P (tem)) return NULL_RTX; if (GET_MODE (tem) == VOIDmode) tem = rtl_hooks.gen_lowpart_no_emit (mode, tem); else gcc_assert (GET_MODE (tem) == mode); return tem; }
int vax_mode_dependent_address_p (rtx x) { rtx xfoo0, xfoo1; /* Auto-increment cases are now dealt with generically in recog.c. */ if (GET_CODE (x) != PLUS) return 0; xfoo0 = XEXP (x, 0); xfoo1 = XEXP (x, 1); if (CONSTANT_ADDRESS_P (xfoo0) && REG_P (xfoo1)) return 0; if (CONSTANT_ADDRESS_P (xfoo1) && REG_P (xfoo0)) return 0; return 1; }
static void kill_set_value (rtx x, const_rtx set, void *data) { struct value_data *const vd = (struct value_data *) data; if (GET_CODE (set) != CLOBBER) { kill_value (x, vd); if (REG_P (x)) set_value_regno (REGNO (x), GET_MODE (x), vd); } }
static rtx_expr_list * extract_mentioned_regs (rtx x) { rtx_expr_list *mentioned_regs = NULL; subrtx_var_iterator::array_type array; FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST) { rtx x = *iter; if (REG_P (x)) mentioned_regs = alloc_EXPR_LIST (0, x, mentioned_regs); }
int vax_mode_dependent_address_p (rtx x) { rtx xfoo0, xfoo1; if (GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_DEC) return 1; if (GET_CODE (x) != PLUS) return 0; xfoo0 = XEXP (x, 0); xfoo1 = XEXP (x, 1); if (CONSTANT_ADDRESS_P (xfoo0) && REG_P (xfoo1)) return 0; if (CONSTANT_ADDRESS_P (xfoo1) && REG_P (xfoo0)) return 0; return 1; }
static void kill_value (const_rtx x, struct value_data *vd) { if (GET_CODE (x) == SUBREG) { rtx tmp = simplify_subreg (GET_MODE (x), SUBREG_REG (x), GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x)); x = tmp ? tmp : SUBREG_REG (x); } if (REG_P (x)) kill_value_regno (REGNO (x), REG_NREGS (x), vd); }
static void sdbout_reg_parms (tree parms) { for (; parms; parms = TREE_CHAIN (parms)) if (DECL_NAME (parms)) { const char *name = IDENTIFIER_POINTER (DECL_NAME (parms)); /* Report parms that live in registers during the function but were passed in memory. */ if (REG_P (DECL_RTL (parms)) && REGNO (DECL_RTL (parms)) < FIRST_PSEUDO_REGISTER && PARM_PASSED_IN_MEMORY (parms)) { if (name == 0 || *name == 0) name = gen_fake_label (); PUT_SDB_DEF (name); PUT_SDB_INT_VAL (DBX_REGISTER_NUMBER (REGNO (DECL_RTL (parms)))); PUT_SDB_SCL (C_REG); PUT_SDB_TYPE (plain_type (TREE_TYPE (parms))); PUT_SDB_ENDEF; } /* Report parms that live in memory but not where they were passed. */ else if (MEM_P (DECL_RTL (parms)) && GET_CODE (XEXP (DECL_RTL (parms), 0)) == PLUS && CONST_INT_P (XEXP (XEXP (DECL_RTL (parms), 0), 1)) && PARM_PASSED_IN_MEMORY (parms) && ! rtx_equal_p (DECL_RTL (parms), DECL_INCOMING_RTL (parms))) { #if 0 /* ??? It is not clear yet what should replace this. */ int offset = DECL_OFFSET (parms) / BITS_PER_UNIT; /* A parm declared char is really passed as an int, so it occupies the least significant bytes. On a big-endian machine those are not the low-numbered ones. */ if (BYTES_BIG_ENDIAN && offset != -1 && TREE_TYPE (parms) != DECL_ARG_TYPE (parms)) offset += (GET_MODE_SIZE (TYPE_MODE (DECL_ARG_TYPE (parms))) - GET_MODE_SIZE (GET_MODE (DECL_RTL (parms)))); if (INTVAL (XEXP (XEXP (DECL_RTL (parms), 0), 1)) != offset) {...} #endif { if (name == 0 || *name == 0) name = gen_fake_label (); PUT_SDB_DEF (name); PUT_SDB_INT_VAL (DEBUGGER_AUTO_OFFSET (XEXP (DECL_RTL (parms), 0))); PUT_SDB_SCL (C_AUTO); PUT_SDB_TYPE (plain_type (TREE_TYPE (parms))); PUT_SDB_ENDEF; } } }
static void reg_dies (rtx reg, HARD_REG_SET *live) { int regno; if (!REG_P (reg)) return; regno = REGNO (reg); if (regno < FIRST_PSEUDO_REGISTER) remove_from_hard_reg_set (live, GET_MODE (reg), regno); }
static bool moxie_legitimate_address_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x, bool strict_p, addr_space_t as) { gcc_assert (ADDR_SPACE_GENERIC_P (as)); if (GET_CODE(x) == PLUS && REG_P (XEXP (x, 0)) && moxie_reg_ok_for_base_p (XEXP (x, 0), strict_p) && CONST_INT_P (XEXP (x, 1)) && IN_RANGE (INTVAL (XEXP (x, 1)), -32768, 32767)) return true; if (REG_P (x) && moxie_reg_ok_for_base_p (x, strict_p)) return true; if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF || GET_CODE (x) == CONST) return true; return false; }
/* Works on Always On power domain only (no PD argument) */ void lpsc_on(unsigned int id) { dv_reg_p mdstat, mdctl; if (id >= DAVINCI_LPSC_GEM) return; /* Don't work on DSP Power Domain */ mdstat = REG_P(PSC_MDSTAT_BASE + (id * 4)); mdctl = REG_P(PSC_MDCTL_BASE + (id * 4)); while (REG(PSC_PTSTAT) & 0x01) {;} if ((*mdstat & 0x1f) == 0x03) return; /* Already on and enabled */ *mdctl |= 0x03; /* Special treatment for some modules as for sprue14 p.7.4.2 */ if ( (id == DAVINCI_LPSC_VPSSSLV) || (id == DAVINCI_LPSC_EMAC) || (id == DAVINCI_LPSC_EMAC_WRAPPER) || (id == DAVINCI_LPSC_MDIO) || (id == DAVINCI_LPSC_USB) || (id == DAVINCI_LPSC_ATA) || (id == DAVINCI_LPSC_VLYNQ) || (id == DAVINCI_LPSC_UHPI) || (id == DAVINCI_LPSC_DDR_EMIF) || (id == DAVINCI_LPSC_AEMIF) || (id == DAVINCI_LPSC_MMC_SD) || (id == DAVINCI_LPSC_MEMSTICK) || (id == DAVINCI_LPSC_McBSP) || (id == DAVINCI_LPSC_GPIO) ) *mdctl |= 0x200; REG(PSC_PTCMD) = 0x01; while (REG(PSC_PTSTAT) & 0x03) {;} while ((*mdstat & 0x1f) != 0x03) {;} /* Probably an overkill... */ }
int c54x_expand_movqi(rtx ops[]) { int done = 0; int i; fprintf(stderr, "--->>>"); for(i=0; i < 2; i++) { print_rtl(stderr, ops[i]); } fprintf(stderr, "<<<---\n"); if(ACC_REG_P(ops[0])) { ops[0] = copy_rtx(ops[0]); PUT_MODE(ops[0], PSImode); fprintf(stderr, "+++"); print_rtl(stderr, ops[0]); fprintf(stderr, "+++\n"); done = 1; if(MEM_P(ops[1])) { emit_insn(gen_ldm(ops[0], ops[1])); } else if(REG_P(ops[1])) { emit_insn(gen_ldu(ops[0], ops[1])); } else if(CONSTANT_P(ops[1])) { emit_insn(gen_ld_const(ops[0], ops[1], gen_reg_rtx(QImode))); } else { done = 2; } } else if( (REG_P(ops[0]) && (GET_CODE(ops[1]) == MEM && REG_P(XEXP(ops[1],0)))) || (T_REG_P(ops[0]) && ARSP_REG_P(ops[1])) ) { done = 2; } return done; }
/* Generate code for transformation 2 (with MODE and OPERATION, operands OP1 and OP2, result TARGET and probability of taking the optimal path PROB). */ static rtx gen_mod_pow2 (enum machine_mode mode, enum rtx_code operation, rtx target, rtx op1, rtx op2, int prob) { rtx tmp, tmp1, tmp2, tmp3, jump; rtx neq_label = gen_label_rtx (); rtx end_label = gen_label_rtx (); rtx sequence; start_sequence (); if (!REG_P (op2)) { tmp = gen_reg_rtx (mode); emit_move_insn (tmp, copy_rtx (op2)); } else tmp = op2; tmp1 = expand_simple_binop (mode, PLUS, tmp, constm1_rtx, NULL_RTX, 0, OPTAB_WIDEN); tmp2 = expand_simple_binop (mode, AND, tmp, tmp1, NULL_RTX, 0, OPTAB_WIDEN); do_compare_rtx_and_jump (tmp2, const0_rtx, NE, 0, mode, NULL_RTX, NULL_RTX, neq_label); /* Add branch probability to jump we just created. */ jump = get_last_insn (); REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (REG_BR_PROB_BASE - prob), REG_NOTES (jump)); tmp3 = expand_simple_binop (mode, AND, op1, tmp1, target, 0, OPTAB_WIDEN); if (tmp3 != target) emit_move_insn (copy_rtx (target), tmp3); emit_jump_insn (gen_jump (end_label)); emit_barrier (); emit_label (neq_label); tmp1 = simplify_gen_binary (operation, mode, copy_rtx (op1), copy_rtx (tmp)); tmp1 = force_operand (tmp1, target); if (tmp1 != target) emit_move_insn (target, tmp1); emit_label (end_label); sequence = get_insns (); end_sequence (); rebuild_jump_labels (sequence); return sequence; }
/* Generate code for transformation 1 (with MODE and OPERATION, operands OP1 and OP2, whose value is expected to be VALUE, result TARGET and probability of taking the optimal path PROB). */ static rtx gen_divmod_fixed_value (enum machine_mode mode, enum rtx_code operation, rtx target, rtx op1, rtx op2, gcov_type value, int prob) { rtx tmp, tmp1, jump; rtx neq_label = gen_label_rtx (); rtx end_label = gen_label_rtx (); rtx sequence; start_sequence (); if (!REG_P (op2)) { tmp = gen_reg_rtx (mode); emit_move_insn (tmp, copy_rtx (op2)); } else tmp = op2; do_compare_rtx_and_jump (tmp, GEN_INT (value), NE, 0, mode, NULL_RTX, NULL_RTX, neq_label); /* Add branch probability to jump we just created. */ jump = get_last_insn (); REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (REG_BR_PROB_BASE - prob), REG_NOTES (jump)); tmp1 = simplify_gen_binary (operation, mode, copy_rtx (op1), GEN_INT (value)); tmp1 = force_operand (tmp1, target); if (tmp1 != target) emit_move_insn (copy_rtx (target), copy_rtx (tmp1)); emit_jump_insn (gen_jump (end_label)); emit_barrier (); emit_label (neq_label); tmp1 = simplify_gen_binary (operation, mode, copy_rtx (op1), copy_rtx (tmp)); tmp1 = force_operand (tmp1, target); if (tmp1 != target) emit_move_insn (copy_rtx (target), copy_rtx (tmp1)); emit_label (end_label); sequence = get_insns (); end_sequence (); rebuild_jump_labels (sequence); return sequence; }
static void kill_set_value (rtx x, const_rtx set, void *data) { struct kill_set_value_data *ksvd = (struct kill_set_value_data *) data; if (rtx_equal_p (x, ksvd->ignore_set_reg)) return; if (GET_CODE (set) != CLOBBER) { kill_value (x, ksvd->vd); if (REG_P (x)) set_value_regno (REGNO (x), GET_MODE (x), ksvd->vd); } }