void fib(int n, int *res) { if (n < 2) { *res = n; } else { int res1 = 0; int res2 = 0; { int mcc_arg_0 = n - 1; int *mcc_arg_1 = &res1; { _Bool mcc_is_in_final; nanos_err_t mcc_err_in_final = nanos_in_final(&mcc_is_in_final); if (mcc_err_in_final != NANOS_OK) { nanos_handle_error(mcc_err_in_final); } if (mcc_is_in_final) { fib_mcc_serial(n - 1, &res1); } else { { nanos_wd_dyn_props_t nanos_wd_dyn_props; int memo_dimensions[1]; struct nanos_args_0_t *ol_args; nanos_err_t err; struct nanos_args_0_t imm_args; static nanos_smp_args_t smp_ol_fib_1_args = {.outline = (void (*)(void *))(void (*)(struct nanos_args_0_t *))&smp_ol_fib_1}; static struct nanos_const_wd_definition_1 nanos_wd_const_data = {.base = {.props = {.mandatory_creation = 0, .tied = 0, .clear_chunk = 0, .reserved0 = 0, .reserved1 = 0, .reserved2 = 0, .reserved3 = 0, .reserved4 = 0}, .data_alignment = __alignof__(struct nanos_args_0_t), .num_copies = 1, .num_devices = 1, .num_dimensions = 1, .description = 0}, .devices = {[0] = {.factory = &nanos_smp_factory, .arg = &smp_ol_fib_1_args}}}; nanos_wd_dyn_props.tie_to = 0; nanos_wd_dyn_props.priority = 0; nanos_wd_dyn_props.flags.is_final = 1; memo_dimensions[0] = mcc_arg_0 + 1; nanos_wd_dyn_props.memo.num_dimensions = 1; nanos_wd_dyn_props.memo.dimensions = memo_dimensions; ol_args = (struct nanos_args_0_t *)0; nanos_wd_t nanos_wd_ = (void *)0; nanos_copy_data_t *ol_copy_data = (nanos_copy_data_t *)0; nanos_region_dimension_internal_t *ol_copy_dimensions = (nanos_region_dimension_internal_t *)0; err = nanos_create_wd_compact(&nanos_wd_, &nanos_wd_const_data.base, &nanos_wd_dyn_props, sizeof(struct nanos_args_0_t), (void **)&ol_args, nanos_current_wd(), &ol_copy_data, &ol_copy_dimensions); if (err != NANOS_OK) { nanos_handle_error(err); } nanos_region_dimension_t dimensions_0[1] = {[0] = {.size = sizeof(int), .lower_bound = 0, .accessed_length = sizeof(int)}}; nanos_data_access_t dependences[1] = {[0] = {.address = (void *)mcc_arg_1, .flags = {.input = 0, .output = 1, .can_rename = 0, .concurrent = 0, .commutative = 0}, .dimension_count = (short int)1, .dimensions = dimensions_0, .offset = 0}}; ; if (nanos_wd_ != (void *)0) { (*ol_args).n = mcc_arg_0; (*ol_args).res = mcc_arg_1; ol_copy_dimensions[0].size = 1 * sizeof(int); ol_copy_dimensions[0].lower_bound = 0 * sizeof(int); ol_copy_dimensions[0].accessed_length = (0 - 0 + 1) * sizeof(int); ol_copy_data[0].sharing = NANOS_SHARED; ol_copy_data[0].address = (void *)mcc_arg_1; ol_copy_data[0].flags.input = 0; ol_copy_data[0].flags.output = 1; ol_copy_data[0].dimension_count = (short int)1; ol_copy_data[0].dimensions = &ol_copy_dimensions[0]; ol_copy_data[0].offset = 0; err = nanos_set_translate_function(nanos_wd_, (void (*)(void *, nanos_wd_t))nanos_xlate_fun_fibompmemoc_0); if (err != NANOS_OK) { nanos_handle_error(err); } err = nanos_submit(nanos_wd_, 1, dependences, (void *)0); if (err != NANOS_OK) { nanos_handle_error(err); } } else { nanos_region_dimension_internal_t imm_copy_dimensions[1]; nanos_copy_data_t imm_copy_data[1]; imm_args.n = mcc_arg_0; imm_args.res = mcc_arg_1; imm_copy_dimensions[0].size = 1 * sizeof(int); imm_copy_dimensions[0].lower_bound = 0 * sizeof(int); imm_copy_dimensions[0].accessed_length = (0 - 0 + 1) * sizeof(int); imm_copy_data[0].sharing = NANOS_SHARED; imm_copy_data[0].address = (void *)mcc_arg_1; imm_copy_data[0].flags.input = 0; imm_copy_data[0].flags.output = 1; imm_copy_data[0].dimension_count = (short int)1; imm_copy_data[0].dimensions = &imm_copy_dimensions[0]; imm_copy_data[0].offset = 0; err = nanos_create_wd_and_run_compact(&nanos_wd_const_data.base, &nanos_wd_dyn_props, sizeof(struct nanos_args_0_t), &imm_args, 1, dependences, imm_copy_data, imm_copy_dimensions, (void (*)(void *, nanos_wd_t))nanos_xlate_fun_fibompmemoc_0); if (err != NANOS_OK) { nanos_handle_error(err); } } } } } }
void NANOS_task( void ( * func ) ( void * ), void *data, long data_size, long ( * get_data_align ) ( void ), void * empty_data, void ( * init_func ) ( void *, void * ), bool if_clause, unsigned untied, int num_deps, int * deps_dir, void ** deps_data, int * deps_n_dims, nanos_region_dimension_t ** deps_dims, long int * deps_offset ) { nanos_err_t err; bool nanos_is_in_final; err = nanos_in_final( &nanos_is_in_final ); if( nanos_is_in_final ) { ( *func )( data ); } else { // Compute copy data (For SMP devices there are no copies. Just CUDA device requires copy data) int num_copies = 0; // TODO Compute dimensions (for devices other than SMP) int num_dimensions = 0; // Compute device descriptor (at the moment, only SMP is supported) int num_devices = 1; // Compute dependencies const unsigned int num_data_accesses = num_deps; nanos_data_access_t dependences[num_data_accesses]; int i; for( i = 0; i < num_data_accesses; ++i ) { int in = ( deps_dir[i] & ( e_dep_dir_in | e_dep_dir_inout ) ); int out = ( deps_dir[i] & ( e_dep_dir_out | e_dep_dir_inout ) ); nanos_access_type_internal_t flags = { ( in != 0 ), // input ( out != 0 ), // output 0 , // can rename 0 , // concurrent 0 , // commutative }; nanos_data_access_t dep = { deps_data[i], flags, deps_n_dims[i], deps_dims[i], deps_offset[i] }; dependences[i] = dep; } // Create the Device descriptor (at the moment, only SMP is supported) nanos_smp_args_t _smp_args = { func }; char * task_name; asprintf( &task_name, "task_%d", task_id++ ); struct nanos_const_wd_definition nanos_wd_const_data = { { { 0, // mandatory creation !untied, // tied 0, 0, 0, 0, 0, 0 }, // properties ( *get_data_align )( ), // data alignment num_copies, num_devices, num_dimensions, task_name // description }, { { &nanos_smp_factory, // device description &_smp_args } // outlined function } }; // Compute properties of the WD: mandatory creation, priority, tiedness, real-time info and copy declarations nanos_wd_dyn_props_t dyn_props; dyn_props.tie_to = 0; dyn_props.priority = 0; dyn_props.flags.is_final = 0; // Create the WD nanos_wd_t wd = (nanos_wd_t) 0; err = nanos_create_wd_compact( &wd, &nanos_wd_const_data.base, &dyn_props, data_size, ( void ** ) &empty_data, nanos_current_wd( ), ( nanos_copy_data_t ** ) 0, ( nanos_region_dimension_internal_t ** ) 0 ); if( err != NANOS_OK ) nanos_handle_error( err ); if( wd != ( void * ) 0 ) { // Submit the task to the existing actual working group // Initialize outlined data ( *init_func )( empty_data, data ); err = nanos_submit( wd, num_data_accesses, dependences, ( void * ) 0 ); if( err != NANOS_OK ) nanos_handle_error( err ); } else { // The task must be run immediately err = nanos_create_wd_and_run_compact( &nanos_wd_const_data.base, &dyn_props, data_size, data, num_data_accesses, dependences, ( nanos_copy_data_t * ) 0, ( nanos_region_dimension_internal_t * ) 0, ( void ( * )( void *, void * ) ) 0 ); if( err != NANOS_OK ) nanos_handle_error( err ); } } }