rtx nds32_expand_store_multiple (int base_regno, int count, rtx base_addr, rtx basemem, bool update_base_reg_p, rtx *update_base_reg) { int par_index; int offset; int start_idx; rtx result; rtx new_addr, mem, reg; if (count == 1) { reg = gen_rtx_REG (SImode, base_regno); if (update_base_reg_p) { *update_base_reg = gen_reg_rtx (SImode); return gen_unaligned_store_update_base_w (*update_base_reg, base_addr, reg); } else return gen_unaligned_store_w (gen_rtx_MEM (SImode, base_addr), reg); } /* Create the pattern that is presented in nds32-multiple.md. */ if (update_base_reg_p) { result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count + 1)); start_idx = 1; } else { result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count)); start_idx = 0; } if (update_base_reg_p) { offset = count * 4; new_addr = plus_constant (Pmode, base_addr, offset); *update_base_reg = gen_reg_rtx (SImode); XVECEXP (result, 0, 0) = gen_rtx_SET (*update_base_reg, new_addr); } for (par_index = 0; par_index < count; par_index++) { offset = par_index * 4; /* 4-byte for storing data to memory. */ new_addr = plus_constant (Pmode, base_addr, offset); mem = adjust_automodify_address_nv (basemem, SImode, new_addr, offset); reg = gen_rtx_REG (SImode, base_regno + par_index); XVECEXP (result, 0, par_index + start_idx) = gen_rtx_SET (mem, reg); } return result; }
static void adjust_frame_related_expr (rtx last_sp_set, rtx insn, HOST_WIDE_INT this_adjust) { rtx note = find_reg_note (last_sp_set, REG_FRAME_RELATED_EXPR, NULL_RTX); rtx new_expr = NULL_RTX; if (note == NULL_RTX && RTX_FRAME_RELATED_P (insn)) return; if (note && GET_CODE (XEXP (note, 0)) == SEQUENCE && XVECLEN (XEXP (note, 0), 0) >= 2) { rtx expr = XEXP (note, 0); rtx last = XVECEXP (expr, 0, XVECLEN (expr, 0) - 1); int i; if (GET_CODE (last) == SET && RTX_FRAME_RELATED_P (last) == RTX_FRAME_RELATED_P (insn) && SET_DEST (last) == stack_pointer_rtx && GET_CODE (SET_SRC (last)) == PLUS && XEXP (SET_SRC (last), 0) == stack_pointer_rtx && CONST_INT_P (XEXP (SET_SRC (last), 1))) { XEXP (SET_SRC (last), 1) = GEN_INT (INTVAL (XEXP (SET_SRC (last), 1)) + this_adjust); return; } new_expr = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (XVECLEN (expr, 0) + 1)); for (i = 0; i < XVECLEN (expr, 0); i++) XVECEXP (new_expr, 0, i) = XVECEXP (expr, 0, i); } else { new_expr = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (2)); if (note) XVECEXP (new_expr, 0, 0) = XEXP (note, 0); else { rtx expr = copy_rtx (single_set_for_csa (last_sp_set)); XEXP (SET_SRC (expr), 1) = GEN_INT (INTVAL (XEXP (SET_SRC (expr), 1)) - this_adjust); RTX_FRAME_RELATED_P (expr) = 1; XVECEXP (new_expr, 0, 0) = expr; } } XVECEXP (new_expr, 0, XVECLEN (new_expr, 0) - 1) = copy_rtx (single_set_for_csa (insn)); RTX_FRAME_RELATED_P (XVECEXP (new_expr, 0, XVECLEN (new_expr, 0) - 1)) = RTX_FRAME_RELATED_P (insn); if (note) XEXP (note, 0) = new_expr; else add_reg_note (last_sp_set, REG_FRAME_RELATED_EXPR, new_expr); }
// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn static void kernexec_instrument_retaddr_or(rtx insn) { rtx orq; rtvec argvec, constraintvec, labelvec; int line; // create asm volatile("orq %%r10,(%%rsp)":::) argvec = rtvec_alloc(0); constraintvec = rtvec_alloc(0); labelvec = rtvec_alloc(0); line = expand_location(RTL_LOCATION(insn)).line; orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line); MEM_VOLATILE_P(orq) = 1; // RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS emit_insn_before(orq, insn); }
rtx nds32_expand_store_multiple (int base_regno, int count, rtx base_addr, rtx basemem) { int par_index; int offset; rtx result; rtx new_addr, mem, reg; /* Create the pattern that is presented in nds32-multiple.md. */ result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count)); for (par_index = 0; par_index < count; par_index++) { offset = par_index * 4; /* 4-byte for storing data to memory. */ new_addr = plus_constant (Pmode, base_addr, offset); mem = adjust_automodify_address_nv (basemem, SImode, new_addr, offset); reg = gen_rtx_REG (SImode, base_regno + par_index); XVECEXP (result, 0, par_index) = gen_rtx_SET (mem, reg); } return result; }
rtvec shallow_copy_rtvec (rtvec vec) { rtvec newvec; int n; n = GET_NUM_ELEM (vec); newvec = rtvec_alloc (n); memcpy (&newvec->elem[0], &vec->elem[0], sizeof (rtx) * n); return newvec; }
rtx rtx_vector_builder::build () { finalize (); rtx x = find_cached_value (); if (x) return x; unsigned int nelts; if (!GET_MODE_NUNITS (m_mode).is_constant (&nelts)) nelts = encoded_nelts (); rtvec v = rtvec_alloc (nelts); for (unsigned int i = 0; i < nelts; ++i) RTVEC_ELT (v, i) = elt (i); x = gen_rtx_raw_CONST_VECTOR (m_mode, v); CONST_VECTOR_NPATTERNS (x) = npatterns (); CONST_VECTOR_NELTS_PER_PATTERN (x) = nelts_per_pattern (); return x; }
rtx copy_rtx (rtx orig) { rtx copy; int i, j; RTX_CODE code; const char *format_ptr; code = GET_CODE (orig); switch (code) { case REG: case DEBUG_EXPR: case VALUE: case CONST_INT: case CONST_DOUBLE: case CONST_FIXED: case CONST_VECTOR: case SYMBOL_REF: case CODE_LABEL: case PC: case CC0: case RETURN: case SIMPLE_RETURN: case SCRATCH: /* SCRATCH must be shared because they represent distinct values. */ return orig; case CLOBBER: if (REG_P (XEXP (orig, 0)) && REGNO (XEXP (orig, 0)) < FIRST_PSEUDO_REGISTER) return orig; break; case CONST: if (shared_const_p (orig)) return orig; break; /* A MEM with a constant address is not sharable. The problem is that the constant address may need to be reloaded. If the mem is shared, then reloading one copy of this mem will cause all copies to appear to have been reloaded. */ default: break; } /* Copy the various flags, fields, and other information. We assume that all fields need copying, and then clear the fields that should not be copied. That is the sensible default behavior, and forces us to explicitly document why we are *not* copying a flag. */ copy = shallow_copy_rtx (orig); /* We do not copy the USED flag, which is used as a mark bit during walks over the RTL. */ RTX_FLAG (copy, used) = 0; format_ptr = GET_RTX_FORMAT (GET_CODE (copy)); for (i = 0; i < GET_RTX_LENGTH (GET_CODE (copy)); i++) switch (*format_ptr++) { case 'e': if (XEXP (orig, i) != NULL) XEXP (copy, i) = copy_rtx (XEXP (orig, i)); break; case 'E': case 'V': if (XVEC (orig, i) != NULL) { XVEC (copy, i) = rtvec_alloc (XVECLEN (orig, i)); for (j = 0; j < XVECLEN (copy, i); j++) XVECEXP (copy, i, j) = copy_rtx (XVECEXP (orig, i, j)); } break; case 't': case 'w': case 'i': case 's': case 'S': case 'T': case 'u': case 'B': case '0': /* These are left unchanged. */ break; default: gcc_unreachable (); } return copy; }
rtx copy_rtx (rtx orig) { rtx copy; int i, j; RTX_CODE code; const char *format_ptr; code = GET_CODE (orig); switch (code) { case REG: case DEBUG_EXPR: case VALUE: CASE_CONST_ANY: case SYMBOL_REF: case CODE_LABEL: case PC: case CC0: case RETURN: case SIMPLE_RETURN: case SCRATCH: /* SCRATCH must be shared because they represent distinct values. */ return orig; case CLOBBER: /* Share clobbers of hard registers (like cc0), but do not share pseudo reg clobbers or clobbers of hard registers that originated as pseudos. This is needed to allow safe register renaming. */ if (REG_P (XEXP (orig, 0)) && REGNO (XEXP (orig, 0)) < FIRST_PSEUDO_REGISTER && ORIGINAL_REGNO (XEXP (orig, 0)) == REGNO (XEXP (orig, 0))) return orig; break; case CLOBBER_HIGH: gcc_assert (REG_P (XEXP (orig, 0))); return orig; case CONST: if (shared_const_p (orig)) return orig; break; /* A MEM with a constant address is not sharable. The problem is that the constant address may need to be reloaded. If the mem is shared, then reloading one copy of this mem will cause all copies to appear to have been reloaded. */ default: break; } /* Copy the various flags, fields, and other information. We assume that all fields need copying, and then clear the fields that should not be copied. That is the sensible default behavior, and forces us to explicitly document why we are *not* copying a flag. */ copy = shallow_copy_rtx (orig); format_ptr = GET_RTX_FORMAT (GET_CODE (copy)); for (i = 0; i < GET_RTX_LENGTH (GET_CODE (copy)); i++) switch (*format_ptr++) { case 'e': if (XEXP (orig, i) != NULL) XEXP (copy, i) = copy_rtx (XEXP (orig, i)); break; case 'E': case 'V': if (XVEC (orig, i) != NULL) { XVEC (copy, i) = rtvec_alloc (XVECLEN (orig, i)); for (j = 0; j < XVECLEN (copy, i); j++) XVECEXP (copy, i, j) = copy_rtx (XVECEXP (orig, i, j)); } break; case 't': case 'w': case 'i': case 'p': case 's': case 'S': case 'T': case 'u': case 'B': case '0': /* These are left unchanged. */ break; default: gcc_unreachable (); } return copy; }
static void process_rtx (rtx desc, int lineno) { switch (GET_CODE (desc)) { case DEFINE_INSN: queue_pattern (desc, &define_insn_tail, read_rtx_filename, lineno); break; case DEFINE_COND_EXEC: queue_pattern (desc, &define_cond_exec_tail, read_rtx_filename, lineno); break; case DEFINE_ATTR: queue_pattern (desc, &define_attr_tail, read_rtx_filename, lineno); break; case DEFINE_PREDICATE: case DEFINE_SPECIAL_PREDICATE: case DEFINE_CONSTRAINT: case DEFINE_REGISTER_CONSTRAINT: case DEFINE_MEMORY_CONSTRAINT: case DEFINE_ADDRESS_CONSTRAINT: queue_pattern (desc, &define_pred_tail, read_rtx_filename, lineno); break; case INCLUDE: process_include (desc, lineno); break; case DEFINE_INSN_AND_SPLIT: { const char *split_cond; rtx split; rtvec attr; int i; struct queue_elem *insn_elem; struct queue_elem *split_elem; /* Create a split with values from the insn_and_split. */ split = rtx_alloc (DEFINE_SPLIT); i = XVECLEN (desc, 1); XVEC (split, 0) = rtvec_alloc (i); while (--i >= 0) { XVECEXP (split, 0, i) = copy_rtx (XVECEXP (desc, 1, i)); remove_constraints (XVECEXP (split, 0, i)); } /* If the split condition starts with "&&", append it to the insn condition to create the new split condition. */ split_cond = XSTR (desc, 4); if (split_cond[0] == '&' && split_cond[1] == '&') { copy_rtx_ptr_loc (split_cond + 2, split_cond); split_cond = join_c_conditions (XSTR (desc, 2), split_cond + 2); } XSTR (split, 1) = split_cond; XVEC (split, 2) = XVEC (desc, 5); XSTR (split, 3) = XSTR (desc, 6); /* Fix up the DEFINE_INSN. */ attr = XVEC (desc, 7); PUT_CODE (desc, DEFINE_INSN); XVEC (desc, 4) = attr; /* Queue them. */ insn_elem = queue_pattern (desc, &define_insn_tail, read_rtx_filename, lineno); split_elem = queue_pattern (split, &other_tail, read_rtx_filename, lineno); insn_elem->split = split_elem; break; } default: queue_pattern (desc, &other_tail, read_rtx_filename, lineno); break; } }
/* Functions to expand load_multiple and store_multiple. They are auxiliary extern functions to help create rtx template. Check nds32-multiple.md file for the patterns. */ rtx nds32_expand_load_multiple (int base_regno, int count, rtx base_addr, rtx basemem, bool update_base_reg_p, rtx *update_base_reg) { int par_index; int offset; int start_idx; rtx result; rtx new_addr, mem, reg; /* Generate a unaligned load to prevent load instruction pull out from parallel, and then it will generate lwi, and lose unaligned acces */ if (count == 1) { reg = gen_rtx_REG (SImode, base_regno); if (update_base_reg_p) { *update_base_reg = gen_reg_rtx (SImode); return gen_unaligned_load_update_base_w (*update_base_reg, reg, base_addr); } else return gen_unaligned_load_w (reg, gen_rtx_MEM (SImode, base_addr)); } /* Create the pattern that is presented in nds32-multiple.md. */ if (update_base_reg_p) { result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count + 1)); start_idx = 1; } else { result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count)); start_idx = 0; } if (update_base_reg_p) { offset = count * 4; new_addr = plus_constant (Pmode, base_addr, offset); *update_base_reg = gen_reg_rtx (SImode); XVECEXP (result, 0, 0) = gen_rtx_SET (*update_base_reg, new_addr); } for (par_index = 0; par_index < count; par_index++) { offset = par_index * 4; /* 4-byte for loading data to each register. */ new_addr = plus_constant (Pmode, base_addr, offset); mem = adjust_automodify_address_nv (basemem, SImode, new_addr, offset); reg = gen_rtx_REG (SImode, base_regno + par_index); XVECEXP (result, 0, (par_index + start_idx)) = gen_rtx_SET (reg, mem); } return result; }
static void process_rtx (rtx desc, int lineno) { switch (GET_CODE (desc)) { case DEFINE_INSN: queue_pattern (desc, &define_insn_tail, read_rtx_filename, lineno); break; case DEFINE_COND_EXEC: queue_pattern (desc, &define_cond_exec_tail, read_rtx_filename, lineno); break; case DEFINE_ATTR: queue_pattern (desc, &define_attr_tail, read_rtx_filename, lineno); break; case INCLUDE: process_include (desc, lineno); break; case DEFINE_INSN_AND_SPLIT: { const char *split_cond; rtx split; rtvec attr; int i; /* Create a split with values from the insn_and_split. */ split = rtx_alloc (DEFINE_SPLIT); i = XVECLEN (desc, 1); XVEC (split, 0) = rtvec_alloc (i); while (--i >= 0) { XVECEXP (split, 0, i) = copy_rtx (XVECEXP (desc, 1, i)); remove_constraints (XVECEXP (split, 0, i)); } /* If the split condition starts with "&&", append it to the insn condition to create the new split condition. */ split_cond = XSTR (desc, 4); if (split_cond[0] == '&' && split_cond[1] == '&') split_cond = concat (XSTR (desc, 2), split_cond, NULL); XSTR (split, 1) = split_cond; XVEC (split, 2) = XVEC (desc, 5); XSTR (split, 3) = XSTR (desc, 6); /* Fix up the DEFINE_INSN. */ attr = XVEC (desc, 7); PUT_CODE (desc, DEFINE_INSN); XVEC (desc, 4) = attr; /* Queue them. */ queue_pattern (desc, &define_insn_tail, read_rtx_filename, lineno); queue_pattern (split, &other_tail, read_rtx_filename, lineno); break; } default: queue_pattern (desc, &other_tail, read_rtx_filename, lineno); break; } }