void spu_cpu_cpp_builtins (struct cpp_reader *pfile) { builtin_define_std ("__SPU__"); cpp_assert (pfile, "cpu=spu"); cpp_assert (pfile, "machine=spu"); if (spu_arch == PROCESSOR_CELLEDP) builtin_define_std ("__SPU_EDP__"); builtin_define_std ("__vector=__attribute__((__spu_vector__))"); if (!flag_iso) { /* Define this when supporting context-sensitive keywords. */ cpp_define (pfile, "__VECTOR_KEYWORD_SUPPORTED__"); cpp_define (pfile, "vector=vector"); /* Initialize vector keywords. */ __vector_keyword = get_identifier ("__vector"); C_CPP_HASHNODE (__vector_keyword)->flags |= NODE_CONDITIONAL; vector_keyword = get_identifier ("vector"); C_CPP_HASHNODE (vector_keyword)->flags |= NODE_CONDITIONAL; /* Enable context-sensitive macros. */ cpp_get_callbacks (pfile)->macro_to_expand = spu_macro_to_expand; } }
void ix86_target_macros (void) { /* 32/64-bit won't change with target specific options, so do the assert and builtin_define_std calls here. */ if (TARGET_64BIT) { cpp_assert (parse_in, "cpu=x86_64"); cpp_assert (parse_in, "machine=x86_64"); cpp_define (parse_in, "__amd64"); cpp_define (parse_in, "__amd64__"); cpp_define (parse_in, "__x86_64"); cpp_define (parse_in, "__x86_64__"); } else { cpp_assert (parse_in, "cpu=i386"); cpp_assert (parse_in, "machine=i386"); builtin_define_std ("i386"); } ix86_target_macros_internal (ix86_isa_flags, ix86_arch, ix86_tune, ix86_fpmath, cpp_define); }
void ix86_target_macros (void) { /* 32/64-bit won't change with target specific options, so do the assert and builtin_define_std calls here. */ if (TARGET_64BIT) { cpp_assert (parse_in, "cpu=x86_64"); cpp_assert (parse_in, "machine=x86_64"); cpp_define (parse_in, "__amd64"); cpp_define (parse_in, "__amd64__"); cpp_define (parse_in, "__x86_64"); cpp_define (parse_in, "__x86_64__"); if (TARGET_X32) { cpp_define (parse_in, "_ILP32"); cpp_define (parse_in, "__ILP32__"); } } else { cpp_assert (parse_in, "cpu=i386"); cpp_assert (parse_in, "machine=i386"); builtin_define_std ("i386"); } cpp_define_formatted (parse_in, "__ATOMIC_HLE_ACQUIRE=%d", IX86_HLE_ACQUIRE); cpp_define_formatted (parse_in, "__ATOMIC_HLE_RELEASE=%d", IX86_HLE_RELEASE); ix86_target_macros_internal (ix86_isa_flags, ix86_arch, ix86_tune, ix86_fpmath, cpp_define); }
void sparc_target_macros (void) { builtin_define_std ("sparc"); if (TARGET_64BIT) { cpp_assert (parse_in, "cpu=sparc64"); cpp_assert (parse_in, "machine=sparc64"); } else { cpp_assert (parse_in, "cpu=sparc"); cpp_assert (parse_in, "machine=sparc"); } if (TARGET_VIS3) { cpp_define (parse_in, "__VIS__=0x300"); cpp_define (parse_in, "__VIS=0x300"); } else if (TARGET_VIS2) { cpp_define (parse_in, "__VIS__=0x200"); cpp_define (parse_in, "__VIS=0x200"); } else if (TARGET_VIS) { cpp_define (parse_in, "__VIS__=0x100"); cpp_define (parse_in, "__VIS=0x100"); } }
void ix86_target_macros (void) { /* 32/64-bit won't change with target specific options, so do the assert and builtin_define_std calls here. */ if (TARGET_64BIT) { cpp_assert (parse_in, "cpu=x86_64"); cpp_assert (parse_in, "machine=x86_64"); cpp_define (parse_in, "__amd64"); cpp_define (parse_in, "__amd64__"); cpp_define (parse_in, "__x86_64"); cpp_define (parse_in, "__x86_64__"); if (TARGET_X32) { cpp_define (parse_in, "_ILP32"); cpp_define (parse_in, "__ILP32__"); } } else { cpp_assert (parse_in, "cpu=i386"); cpp_assert (parse_in, "machine=i386"); builtin_define_std ("i386"); } if (!TARGET_80387) cpp_define (parse_in, "_SOFT_FLOAT"); if (TARGET_LONG_DOUBLE_64) cpp_define (parse_in, "__LONG_DOUBLE_64__"); if (TARGET_LONG_DOUBLE_128) cpp_define (parse_in, "__LONG_DOUBLE_128__"); if (TARGET_128BIT_LONG_DOUBLE) cpp_define (parse_in, "__SIZEOF_FLOAT80__=16"); else cpp_define (parse_in, "__SIZEOF_FLOAT80__=12"); cpp_define (parse_in, "__SIZEOF_FLOAT128__=16"); cpp_define_formatted (parse_in, "__ATOMIC_HLE_ACQUIRE=%d", IX86_HLE_ACQUIRE); cpp_define_formatted (parse_in, "__ATOMIC_HLE_RELEASE=%d", IX86_HLE_RELEASE); cpp_define (parse_in, "__GCC_ASM_FLAG_OUTPUTS__"); ix86_target_macros_internal (ix86_isa_flags, ix86_isa_flags2, ix86_arch, ix86_tune, ix86_fpmath, cpp_define); cpp_define (parse_in, "__SEG_FS"); cpp_define (parse_in, "__SEG_GS"); }
void spu_cpu_cpp_builtins (struct cpp_reader *pfile) { builtin_define_std ("__SPU__"); cpp_assert (pfile, "cpu=spu"); cpp_assert (pfile, "machine=spu"); if (spu_arch == PROCESSOR_CELLEDP) builtin_define_std ("__SPU_EDP__"); builtin_define_std ("__vector=__attribute__((__spu_vector__))"); }
void check(const I& input, const K& kernel, const C& conv) const { static_assert(etl::dimensions<I>() == 2, "Invalid number of dimensions for input of conv2_backward"); static_assert(etl::dimensions<K>() == 2, "Invalid number of dimensions for kernel of conv2_backward"); static_assert(etl::dimensions<C>() == 2, "Invalid number of dimensions for conv of conv2_backward"); cpp_assert(etl::dim(conv, 0) == s1 * (etl::dim(input, 0) - 1) + etl::dim(kernel, 0) - 2 * p1, "Invalid dimensions for conv2_backward"); cpp_assert(etl::dim(conv, 1) == s2 * (etl::dim(input, 1) - 1) + etl::dim(kernel, 1) - 2 * p2, "Invalid dimensions for conv2_backward"); cpp_unused(input); cpp_unused(kernel); cpp_unused(conv); }
/* Define platform dependent macros. */ void s390_cpu_cpp_builtins (cpp_reader *pfile) { struct cl_target_option opts; cpp_assert (pfile, "cpu=s390"); cpp_assert (pfile, "machine=s390"); cpp_define (pfile, "__s390__"); if (TARGET_ZARCH) cpp_define (pfile, "__zarch__"); if (TARGET_64BIT) cpp_define (pfile, "__s390x__"); if (TARGET_LONG_DOUBLE_128) cpp_define (pfile, "__LONG_DOUBLE_128__"); cl_target_option_save (&opts, &global_options); s390_cpu_cpp_builtins_internal (pfile, &opts, NULL); }
void train_batch(std::size_t /*epoch*/, const dll::batch<T>& data_batch, const dll::batch<L>& label_batch) { cpp_assert(data_batch.size() == label_batch.size(), "Invalid sizes"); auto n = label_batch.size(); decltype(auto) first_layer = dbn.template layer_get<0>(); decltype(auto) first_ctx = first_layer.template get_sgd_context<dbn_t>(); decltype(auto) last_layer = dbn.template layer_get<layers - 1>(); decltype(auto) last_ctx = last_layer.template get_sgd_context<dbn_t>(); using inputs_t = typename input_batch_t<0>::type; using outputs_t = typename output_batch_t<layers - 1>::type; inputs_t inputs; outputs_t labels; //Copy inputs and labels into suitable data structure copy_inputs(inputs, data_batch.begin(), data_batch.end()); copy_labels(labels, label_batch.begin(), label_batch.end()); //Feedforward pass compute_outputs(inputs); static_assert( decay_layer_traits<decltype(last_layer)>::is_dense_layer() || decay_layer_traits<decltype(last_layer)>::is_standard_rbm_layer(), "The last layer must be dense for SGD trainining"); //Compute the errors of the last layer compute_last_errors(last_layer, last_ctx, labels); //Compute the gradients of each layer dbn.for_each_layer_rpair([](auto& r1, auto& r2) { auto& ctx1 = r1.template get_sgd_context<dbn_t>(); auto& ctx2 = r2.template get_sgd_context<dbn_t>(); this_type::compute_gradients(r2, ctx2, ctx1.output); this_type::compute_errors(r1, ctx1, r2, ctx2); }); compute_gradients(first_layer, first_ctx, inputs); //Apply gradients dbn.for_each_layer([this, n](auto& layer) { this->apply_gradients(layer, n); }); }
void spu_cpu_cpp_builtins (struct cpp_reader *pfile) { cpp_define (pfile, "__SPU__"); cpp_assert (pfile, "cpu=spu"); cpp_assert (pfile, "machine=spu"); if (spu_arch == PROCESSOR_CELLEDP) cpp_define (pfile, "__SPU_EDP__"); if (cpp_get_options (pfile)->lang != CLK_ASM) cpp_define (pfile, "__vector=__attribute__((__spu_vector__))"); switch (spu_ea_model) { case 32: cpp_define (pfile, "__EA32__"); break; case 64: cpp_define (pfile, "__EA64__"); break; default: gcc_unreachable (); } if (!flag_iso && cpp_get_options (pfile)->lang != CLK_ASM) { /* Define this when supporting context-sensitive keywords. */ cpp_define (pfile, "__VECTOR_KEYWORD_SUPPORTED__"); cpp_define (pfile, "vector=vector"); /* Initialize vector keywords. */ __vector_keyword = get_identifier ("__vector"); C_CPP_HASHNODE (__vector_keyword)->flags |= NODE_CONDITIONAL; vector_keyword = get_identifier ("vector"); C_CPP_HASHNODE (vector_keyword)->flags |= NODE_CONDITIONAL; /* Enable context-sensitive macros. */ cpp_get_callbacks (pfile)->macro_to_expand = spu_macro_to_expand; } }
batch(Iterator&& first, Iterator&& last) : first(std::forward<Iterator>(first)), last(std::forward<Iterator>(last)) { cpp_assert(std::distance(first, last) > 0, "Batch cannot be empty or reversed"); }