示例#1
0
static bool
get_optab_extraction_insn (struct extraction_insn *insn,
			   enum extraction_type type,
			   machine_mode mode, direct_optab reg_optab,
			   direct_optab misalign_optab, int pos_op)
{
  direct_optab optab = (type == ET_unaligned_mem ? misalign_optab : reg_optab);
  enum insn_code icode = direct_optab_handler (optab, mode);
  if (icode == CODE_FOR_nothing)
    return false;

  const struct insn_data_d *data = &insn_data[icode];

  machine_mode pos_mode = data->operand[pos_op].mode;
  if (pos_mode == VOIDmode)
    pos_mode = word_mode;

  insn->icode = icode;
  insn->field_mode = as_a <scalar_int_mode> (mode);
  if (type == ET_unaligned_mem)
    insn->struct_mode = opt_scalar_int_mode ();
  else
    insn->struct_mode = insn->field_mode;
  insn->pos_mode = as_a <scalar_int_mode> (pos_mode);
  return true;
}
示例#2
0
insn_code
direct_optab_handler (convert_optab optab, machine_mode mode,
		      optimization_type opt_type)
{
  insn_code icode = direct_optab_handler (optab, mode);
  if (icode == CODE_FOR_nothing
      || !targetm.optab_supported_p (optab, mode, mode, opt_type))
    return CODE_FOR_nothing;
  return icode;
}
示例#3
0
void
expand_builtin_cilk_detach (tree exp)
{
  rtx_insn *insn;
  tree fptr = get_frame_arg (exp);

  if (fptr == NULL_TREE)
    return;

  tree parent = cilk_dot (fptr, CILK_TI_FRAME_PARENT, 0);
  tree worker = cilk_dot (fptr, CILK_TI_FRAME_WORKER, 0);
  tree tail = cilk_arrow (worker, CILK_TI_WORKER_TAIL, 1);

  rtx wreg = expand_expr (worker, NULL_RTX, Pmode, EXPAND_NORMAL);
  if (GET_CODE (wreg) != REG)
    wreg = copy_to_reg (wreg);
  rtx preg = expand_expr (parent, NULL_RTX, Pmode, EXPAND_NORMAL);

  /* TMP <- WORKER.TAIL
    *TMP <- PARENT
     TMP <- TMP + 1
     WORKER.TAIL <- TMP   */

  HOST_WIDE_INT worker_tail_offset =
    tree_to_shwi (DECL_FIELD_OFFSET (cilk_trees[CILK_TI_WORKER_TAIL])) +
    tree_to_shwi (DECL_FIELD_BIT_OFFSET (cilk_trees[CILK_TI_WORKER_TAIL])) /
    BITS_PER_UNIT;
  rtx tmem0 = gen_rtx_MEM (Pmode,
			   plus_constant (Pmode, wreg, worker_tail_offset));
  set_mem_attributes (tmem0, tail, 0);
  MEM_NOTRAP_P (tmem0) = 1;
  gcc_assert (MEM_VOLATILE_P (tmem0));
  rtx treg = copy_to_mode_reg (Pmode, tmem0);
  rtx tmem1 = gen_rtx_MEM (Pmode, treg);
  set_mem_attributes (tmem1, TREE_TYPE (TREE_TYPE (tail)), 0);
  MEM_NOTRAP_P (tmem1) = 1;
  emit_move_insn (tmem1, preg);
  emit_move_insn (treg, plus_constant (Pmode, treg, GET_MODE_SIZE (Pmode)));

  /* There is a release barrier (st8.rel, membar #StoreStore,
     sfence, lwsync, etc.) between the two stores.  On x86
     normal volatile stores have proper semantics; the sfence
     would only be needed for nontemporal stores (which we
     could generate using the storent optab, for no benefit
     in this case).

     The predicate may return false even for a REG if this is
     the limited release operation that only stores 0.  */
  enum insn_code icode = direct_optab_handler (sync_lock_release_optab, Pmode); 
  if (icode != CODE_FOR_nothing
      && insn_data[icode].operand[1].predicate (treg, Pmode)
      && (insn = GEN_FCN (icode) (tmem0, treg)) != NULL_RTX)
    emit_insn (insn);
  else
    emit_move_insn (tmem0, treg);

  /* The memory barrier inserted above should not prevent
     the load of flags from being moved before the stores,
     but in practice it does because it is implemented with
     unspec_volatile.  In-order RISC machines should
     explicitly load flags earlier.  */

  tree flags = cilk_dot (fptr, CILK_TI_FRAME_FLAGS, 0);
  expand_expr (build2 (MODIFY_EXPR, void_type_node, flags,
		       build2 (BIT_IOR_EXPR, TREE_TYPE (flags), flags,
			       build_int_cst (TREE_TYPE (flags),
					      CILK_FRAME_DETACHED))),
	       const0_rtx, VOIDmode, EXPAND_NORMAL);
}