void aarch64_init_builtins (void) { tree ftype_set_fpr = build_function_type_list (void_type_node, unsigned_type_node, NULL); tree ftype_get_fpr = build_function_type_list (unsigned_type_node, NULL); aarch64_builtin_decls[AARCH64_BUILTIN_GET_FPCR] = add_builtin_function ("__builtin_aarch64_get_fpcr", ftype_get_fpr, AARCH64_BUILTIN_GET_FPCR, BUILT_IN_MD, NULL, NULL_TREE); aarch64_builtin_decls[AARCH64_BUILTIN_SET_FPCR] = add_builtin_function ("__builtin_aarch64_set_fpcr", ftype_set_fpr, AARCH64_BUILTIN_SET_FPCR, BUILT_IN_MD, NULL, NULL_TREE); aarch64_builtin_decls[AARCH64_BUILTIN_GET_FPSR] = add_builtin_function ("__builtin_aarch64_get_fpsr", ftype_get_fpr, AARCH64_BUILTIN_GET_FPSR, BUILT_IN_MD, NULL, NULL_TREE); aarch64_builtin_decls[AARCH64_BUILTIN_SET_FPSR] = add_builtin_function ("__builtin_aarch64_set_fpsr", ftype_set_fpr, AARCH64_BUILTIN_SET_FPSR, BUILT_IN_MD, NULL, NULL_TREE); if (TARGET_SIMD) aarch64_init_simd_builtins (); if (TARGET_CRC32) aarch64_init_crc32_builtins (); }
void mchp_init_cci_builtins(void) { #define CCI(TARGET, CCI_KIND, CCI_KEYWORD, TGT_FN, N) \ if (TARGET && CCI_KIND == CCI_pragma) \ c_register_pragma(0, CCI_KEYWORD, TGT_FN); #include CCI_H /* * Special case mapping * */ if (IMPORT_MCHP("iar")) { /* define builtins for the functions that we don't define * and mark them as unsupported */ tree attrib, args; tree fn_type; # define MESSAGE "Intrinsic function is unsupported for this target" args = build_tree_list(NULL_TREE, build_string(strlen(MESSAGE), MESSAGE)); attrib = build_tree_list(get_identifier("target_error"), args); # undef MESSAGE fn_type = build_function_type_list(void_type_node, void_type_node,NULL_TREE); add_builtin_function("__disable_fiq", fn_type, 0, BUILT_IN_MD, NULL, attrib); /* ^ this should be okay * because we are going to generate an error for it... */ add_builtin_function("__disable_irq", fn_type, 0, BUILT_IN_MD, NULL, attrib); /* ^ this should be okay * because we are going to generate an error for it... */ add_builtin_function("__enable_fiq", fn_type, 0, BUILT_IN_MD, NULL, attrib); /* ^ this should be okay * because we are going to generate an error for it... */ add_builtin_function("__enable_irq", fn_type, 0, BUILT_IN_MD, NULL, attrib); /* ^ this should be okay * because we are going to generate an error for it... */ } }
void nds32_init_builtins_impl (void) { tree pointer_type_node = build_pointer_type (integer_type_node); tree void_ftype_void = build_function_type (void_type_node, void_list_node); tree void_ftype_pint = build_function_type_list (void_type_node, pointer_type_node, NULL_TREE); tree int_ftype_int = build_function_type_list (integer_type_node, integer_type_node, NULL_TREE); tree void_ftype_int_int = build_function_type_list (void_type_node, integer_type_node, integer_type_node, NULL_TREE); /* Cache. */ add_builtin_function ("__builtin_nds32_isync", void_ftype_pint, NDS32_BUILTIN_ISYNC, BUILT_IN_MD, NULL, NULL_TREE); add_builtin_function ("__builtin_nds32_isb", void_ftype_void, NDS32_BUILTIN_ISB, BUILT_IN_MD, NULL, NULL_TREE); /* Register Transfer. */ add_builtin_function ("__builtin_nds32_mfsr", int_ftype_int, NDS32_BUILTIN_MFSR, BUILT_IN_MD, NULL, NULL_TREE); add_builtin_function ("__builtin_nds32_mfusr", int_ftype_int, NDS32_BUILTIN_MFUSR, BUILT_IN_MD, NULL, NULL_TREE); add_builtin_function ("__builtin_nds32_mtsr", void_ftype_int_int, NDS32_BUILTIN_MTSR, BUILT_IN_MD, NULL, NULL_TREE); add_builtin_function ("__builtin_nds32_mtusr", void_ftype_int_int, NDS32_BUILTIN_MTUSR, BUILT_IN_MD, NULL, NULL_TREE); /* Interrupt. */ add_builtin_function ("__builtin_nds32_setgie_en", void_ftype_void, NDS32_BUILTIN_SETGIE_EN, BUILT_IN_MD, NULL, NULL_TREE); add_builtin_function ("__builtin_nds32_setgie_dis", void_ftype_void, NDS32_BUILTIN_SETGIE_DIS, BUILT_IN_MD, NULL, NULL_TREE); }
static void gfc_define_builtin (const char *name, tree type, enum built_in_function code, const char *library_name, int attr) { tree decl; decl = add_builtin_function (name, type, code, BUILT_IN_NORMAL, library_name, NULL_TREE); set_call_expr_flags (decl, attr); set_builtin_decl (code, decl, true); }
static void aarch64_init_crc32_builtins () { tree usi_type = aarch64_simd_builtin_std_type (SImode, qualifier_unsigned); unsigned int i = 0; for (i = 0; i < ARRAY_SIZE (aarch64_crc_builtin_data); ++i) { aarch64_crc_builtin_datum* d = &aarch64_crc_builtin_data[i]; tree argtype = aarch64_simd_builtin_std_type (d->mode, qualifier_unsigned); tree ftype = build_function_type_list (usi_type, usi_type, argtype, NULL_TREE); tree fndecl = add_builtin_function (d->name, ftype, d->fcode, BUILT_IN_MD, NULL, NULL_TREE); aarch64_builtin_decls[d->fcode] = fndecl; } }
static void aarch64_init_simd_builtins (void) { unsigned int i, fcode = AARCH64_SIMD_BUILTIN_BASE + 1; aarch64_init_simd_builtin_types (); /* Strong-typing hasn't been implemented for all AdvSIMD builtin intrinsics. Therefore we need to preserve the old __builtin scalar types. It can be removed once all the intrinsics become strongly typed using the qualifier system. */ aarch64_init_simd_builtin_scalar_types (); for (i = 0; i < ARRAY_SIZE (aarch64_simd_builtin_data); i++, fcode++) { bool print_type_signature_p = false; char type_signature[SIMD_MAX_BUILTIN_ARGS] = { 0 }; aarch64_simd_builtin_datum *d = &aarch64_simd_builtin_data[i]; char namebuf[60]; tree ftype = NULL; tree fndecl = NULL; d->fcode = fcode; /* We must track two variables here. op_num is the operand number as in the RTL pattern. This is required to access the mode (e.g. V4SF mode) of the argument, from which the base type can be derived. arg_num is an index in to the qualifiers data, which gives qualifiers to the type (e.g. const unsigned). The reason these two variables may differ by one is the void return type. While all return types take the 0th entry in the qualifiers array, there is no operand for them in the RTL pattern. */ int op_num = insn_data[d->code].n_operands - 1; int arg_num = d->qualifiers[0] & qualifier_void ? op_num + 1 : op_num; tree return_type = void_type_node, args = void_list_node; tree eltype; /* Build a function type directly from the insn_data for this builtin. The build_function_type () function takes care of removing duplicates for us. */ for (; op_num >= 0; arg_num--, op_num--) { machine_mode op_mode = insn_data[d->code].operand[op_num].mode; enum aarch64_type_qualifiers qualifiers = d->qualifiers[arg_num]; if (qualifiers & qualifier_unsigned) { type_signature[arg_num] = 'u'; print_type_signature_p = true; } else if (qualifiers & qualifier_poly) { type_signature[arg_num] = 'p'; print_type_signature_p = true; } else type_signature[arg_num] = 's'; /* Skip an internal operand for vget_{low, high}. */ if (qualifiers & qualifier_internal) continue; /* Some builtins have different user-facing types for certain arguments, encoded in d->mode. */ if (qualifiers & qualifier_map_mode) op_mode = d->mode; /* For pointers, we want a pointer to the basic type of the vector. */ if (qualifiers & qualifier_pointer && VECTOR_MODE_P (op_mode)) op_mode = GET_MODE_INNER (op_mode); eltype = aarch64_simd_builtin_type (op_mode, (qualifiers & qualifier_unsigned) != 0, (qualifiers & qualifier_poly) != 0); gcc_assert (eltype != NULL); /* Add qualifiers. */ if (qualifiers & qualifier_const) eltype = build_qualified_type (eltype, TYPE_QUAL_CONST); if (qualifiers & qualifier_pointer) eltype = build_pointer_type (eltype); /* If we have reached arg_num == 0, we are at a non-void return type. Otherwise, we are still processing arguments. */ if (arg_num == 0) return_type = eltype; else args = tree_cons (NULL_TREE, eltype, args); } ftype = build_function_type (return_type, args); gcc_assert (ftype != NULL); if (print_type_signature_p) snprintf (namebuf, sizeof (namebuf), "__builtin_aarch64_%s_%s", d->name, type_signature); else snprintf (namebuf, sizeof (namebuf), "__builtin_aarch64_%s", d->name); fndecl = add_builtin_function (namebuf, ftype, fcode, BUILT_IN_MD, NULL, NULL_TREE); aarch64_builtin_decls[fcode] = fndecl; } }
unsigned int tree_ssa_prefetch_arrays (void) { loop_iterator li; struct loop *loop; bool unrolled = false; int todo_flags = 0; if (!HAVE_prefetch /* It is possible to ask compiler for say -mtune=i486 -march=pentium4. -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part of processor costs and i486 does not have prefetch, but -march=pentium4 causes HAVE_prefetch to be true. Ugh. */ || PREFETCH_BLOCK == 0) return 0; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Prefetching parameters:\n"); fprintf (dump_file, " simultaneous prefetches: %d\n", SIMULTANEOUS_PREFETCHES); fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY); fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK); fprintf (dump_file, " L1 cache size: %d lines, %d kB\n", L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE); fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE); fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE); fprintf (dump_file, "\n"); } initialize_original_copy_tables (); if (!built_in_decls[BUILT_IN_PREFETCH]) { tree type = build_function_type (void_type_node, tree_cons (NULL_TREE, const_ptr_type_node, NULL_TREE)); tree decl = add_builtin_function ("__builtin_prefetch", type, BUILT_IN_PREFETCH, BUILT_IN_NORMAL, NULL, NULL_TREE); DECL_IS_NOVOPS (decl) = true; built_in_decls[BUILT_IN_PREFETCH] = decl; } /* We assume that size of cache line is a power of two, so verify this here. */ gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0); FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Processing loop %d:\n", loop->num); unrolled |= loop_prefetch_arrays (loop); if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "\n\n"); } if (unrolled) { scev_reset (); todo_flags |= TODO_cleanup_cfg; } free_original_copy_tables (); return todo_flags; }