Beispiel #1
0
void NANOS_parallel_end( void )
{
    nanos_err_t err = nanos_omp_barrier( );
    if( err != NANOS_OK )
        nanos_handle_error( err );
    err = nanos_leave_team( );
    if( err != NANOS_OK )
        nanos_handle_error( err );
}
Beispiel #2
0
void NANOS_parallel_init( void )
{
    nanos_err_t err = nanos_omp_set_implicit( nanos_current_wd( ) );
    if( err != NANOS_OK )
        nanos_handle_error( err );
    err = nanos_enter_team( );
    if( err != NANOS_OK )
        nanos_handle_error( err );
}
Beispiel #3
0
void NANOS_taskwait( void )
{
    void * wg = nanos_current_wd( );
    nanos_err_t err = nanos_wg_wait_completion( wg, 0 );
    if( err != NANOS_OK )
        nanos_handle_error( err );
}
Beispiel #4
0
bool NANOS_single( void )
{
    bool single_guard;
  
    nanos_err_t err = nanos_single_guard( &single_guard );
    if( err != NANOS_OK )
        nanos_handle_error(err);

    return single_guard;
}
static void nanos_xlate_fun_fibompmemoc_1(struct nanos_args_1_t *const arg, void *wd)
{
  {
    void *device_base_address;
    nanos_err_t err;
    device_base_address = 0;
    err = nanos_get_addr(0, &device_base_address, wd);
    if (err != NANOS_OK)
      {
        nanos_handle_error(err);
      }
    (*arg).res = (int *)device_base_address;
  }
}
Beispiel #6
0
void NANOS_sections( void ( * func ) ( void * section_data, nanos_ws_desc_t * wsd ), void * data,
                     long data_size, long ( * get_data_align )( void ), void * empty_data, void ( * init_func ) ( void *, void * ),
                     int n_sections, bool wait )
{
    // Get scheduling policy
    void * ws_policy = nanos_omp_find_worksharing( omp_sched_static );
    if( ws_policy == 0 )
        nanos_handle_error( NANOS_UNIMPLEMENTED );
    
    char * sections_name; 
    asprintf( &sections_name, "sections_%d", sections_id++ );
    NANOS_worksharing( /*lb*/ 0, /*ub*/ n_sections - 1, /*step*/ 1 , /*chunk*/ 1, sections_name,
                       func, data, data_size, get_data_align, empty_data, init_func, 
                       ws_policy, wait );
}
Beispiel #7
0
void NANOS_loop( void ( * func ) ( void * loop_data, nanos_ws_desc_t * wsd ), void * data, long data_size, long ( * get_data_align )( void ),
                 void* empty_data, void ( * init_func ) ( void *, void * ), int policy,
                 int lower_bound, int upper_bound, int step, int chunk, bool wait )
{
    // Get scheduling policy
    void * ws_policy = nanos_omp_find_worksharing( nanos_get_scheduling( policy ) );
    if( ws_policy == 0 )
        nanos_handle_error( NANOS_UNIMPLEMENTED );

    char * loop_name; 
    asprintf( &loop_name, "loop_%d", loop_id++ );
    NANOS_worksharing( lower_bound, upper_bound, step , chunk, loop_name,
                       func, data, data_size, get_data_align, empty_data, init_func, 
                       ws_policy, wait );
}
void fib(int n, int *res)
{
  if (n < 2)
    {
      *res = n;
    }
  else
    {
      int res1 = 0;
      int res2 = 0;
      {
        int mcc_arg_0 = n - 1;
        int *mcc_arg_1 = &res1;
        {
          _Bool mcc_is_in_final;
          nanos_err_t mcc_err_in_final = nanos_in_final(&mcc_is_in_final);
          if (mcc_err_in_final != NANOS_OK)
            {
              nanos_handle_error(mcc_err_in_final);
            }
          if (mcc_is_in_final)
            {
              fib_mcc_serial(n - 1, &res1);
            }
          else
            {
              {
                nanos_wd_dyn_props_t nanos_wd_dyn_props;
                int memo_dimensions[1];
                struct nanos_args_0_t *ol_args;
                nanos_err_t err;
                struct nanos_args_0_t imm_args;
                static nanos_smp_args_t smp_ol_fib_1_args = {.outline = (void (*)(void *))(void (*)(struct nanos_args_0_t *))&smp_ol_fib_1};
                static struct nanos_const_wd_definition_1 nanos_wd_const_data = {.base = {.props = {.mandatory_creation = 0, .tied = 0, .clear_chunk = 0, .reserved0 = 0, .reserved1 = 0, .reserved2 = 0, .reserved3 = 0, .reserved4 = 0}, .data_alignment = __alignof__(struct nanos_args_0_t), .num_copies = 1, .num_devices = 1, .num_dimensions = 1, .description = 0}, .devices = {[0] = {.factory = &nanos_smp_factory, .arg = &smp_ol_fib_1_args}}};
                nanos_wd_dyn_props.tie_to = 0;
                nanos_wd_dyn_props.priority = 0;
                nanos_wd_dyn_props.flags.is_final = 1;
                memo_dimensions[0] = mcc_arg_0 + 1;
                nanos_wd_dyn_props.memo.num_dimensions = 1;
                nanos_wd_dyn_props.memo.dimensions = memo_dimensions;
                ol_args = (struct nanos_args_0_t *)0;
                nanos_wd_t nanos_wd_ = (void *)0;
                nanos_copy_data_t *ol_copy_data = (nanos_copy_data_t *)0;
                nanos_region_dimension_internal_t *ol_copy_dimensions = (nanos_region_dimension_internal_t *)0;
                err = nanos_create_wd_compact(&nanos_wd_, &nanos_wd_const_data.base, &nanos_wd_dyn_props, sizeof(struct nanos_args_0_t), (void **)&ol_args, nanos_current_wd(), &ol_copy_data, &ol_copy_dimensions);
                if (err != NANOS_OK)
                  {
                    nanos_handle_error(err);
                  }
                nanos_region_dimension_t dimensions_0[1] = {[0] = {.size = sizeof(int), .lower_bound = 0, .accessed_length = sizeof(int)}};
                nanos_data_access_t dependences[1] = {[0] = {.address = (void *)mcc_arg_1, .flags = {.input = 0, .output = 1, .can_rename = 0, .concurrent = 0, .commutative = 0}, .dimension_count = (short int)1, .dimensions = dimensions_0, .offset = 0}};
                ;
                if (nanos_wd_ != (void *)0)
                  {
                    (*ol_args).n = mcc_arg_0;
                    (*ol_args).res = mcc_arg_1;
                    ol_copy_dimensions[0].size = 1 * sizeof(int);
                    ol_copy_dimensions[0].lower_bound = 0 * sizeof(int);
                    ol_copy_dimensions[0].accessed_length = (0 - 0 + 1) * sizeof(int);
                    ol_copy_data[0].sharing = NANOS_SHARED;
                    ol_copy_data[0].address = (void *)mcc_arg_1;
                    ol_copy_data[0].flags.input = 0;
                    ol_copy_data[0].flags.output = 1;
                    ol_copy_data[0].dimension_count = (short int)1;
                    ol_copy_data[0].dimensions = &ol_copy_dimensions[0];
                    ol_copy_data[0].offset = 0;
                    err = nanos_set_translate_function(nanos_wd_, (void (*)(void *, nanos_wd_t))nanos_xlate_fun_fibompmemoc_0);
                    if (err != NANOS_OK)
                      {
                        nanos_handle_error(err);
                      }
                    err = nanos_submit(nanos_wd_, 1, dependences, (void *)0);
                    if (err != NANOS_OK)
                      {
                        nanos_handle_error(err);
                      }
                  }
                else
                  {
                    nanos_region_dimension_internal_t imm_copy_dimensions[1];
                    nanos_copy_data_t imm_copy_data[1];
                    imm_args.n = mcc_arg_0;
                    imm_args.res = mcc_arg_1;
                    imm_copy_dimensions[0].size = 1 * sizeof(int);
                    imm_copy_dimensions[0].lower_bound = 0 * sizeof(int);
                    imm_copy_dimensions[0].accessed_length = (0 - 0 + 1) * sizeof(int);
                    imm_copy_data[0].sharing = NANOS_SHARED;
                    imm_copy_data[0].address = (void *)mcc_arg_1;
                    imm_copy_data[0].flags.input = 0;
                    imm_copy_data[0].flags.output = 1;
                    imm_copy_data[0].dimension_count = (short int)1;
                    imm_copy_data[0].dimensions = &imm_copy_dimensions[0];
                    imm_copy_data[0].offset = 0;
                    err = nanos_create_wd_and_run_compact(&nanos_wd_const_data.base, &nanos_wd_dyn_props, sizeof(struct nanos_args_0_t), &imm_args, 1, dependences, imm_copy_data, imm_copy_dimensions, (void (*)(void *, nanos_wd_t))nanos_xlate_fun_fibompmemoc_0);
                    if (err != NANOS_OK)
                      {
                        nanos_handle_error(err);
                      }
                  }
              }
            }
        }
      }
int main(int argc, char * argv[])
{
    int * * l_array_of_arrays;
    int * l_partial_sums;
    int l_num_procs;
    int l_total;
    int l_i, l_j;
    if (argc != 2)
    {
        printf("Usage: %s number_of_processors\n", argv[0]);
        return 0;
    }
    l_num_procs = atoi(argv[1]);
    if (l_num_procs < 1 && l_num_procs > 16)
    {
        printf("The number of processors must be between 1 and 16\n");
        return 0;
    }
    l_partial_sums = (int *) malloc(l_num_procs * sizeof(int));
    l_array_of_arrays = (int **) malloc(l_num_procs * sizeof(int *));
    for (l_i = 0;
        l_i < l_num_procs;
        l_i++)
    {
        l_array_of_arrays[l_i] = (int *) malloc(16834 * sizeof(int));
        for (l_j = 0;
            l_j < 16834;
            l_j++)
        {
            if ((l_j % 2) == 0)
                l_array_of_arrays[l_i][l_j] = 1;
            else
                l_array_of_arrays[l_i][l_j] = 0;
        }
    }
    for (l_i = 0;
        l_i < l_num_procs;
        l_i++)
    {
        {
            nanos_smp_args_t _ol_main_0_smp_args = {
                (void (*)(void *)) _smp__ol_main_0
            };
            _nx_data_env_0_t * ol_args = (_nx_data_env_0_t *) 0;
            nanos_wd_t wd = (nanos_wd_t) 0;
            const_data1.data_alignment = __alignof__(_nx_data_env_0_t);
            const_data1.devices[0].arg = &_ol_main_0_smp_args;
            nanos_wd_dyn_props_t dyn_data1 = { 0 };
            nanos_err_t err;
            err = nanos_create_wd_compact(&wd, (nanos_const_wd_definition_t *) &const_data1, &dyn_data1, sizeof(_nx_data_env_0_t), (void **) &ol_args, nanos_current_wd(), (nanos_copy_data_t **) 0, NULL);
            if (err != NANOS_OK)
                nanos_handle_error(err);
            if (wd != (nanos_wd_t) 0)
            {
                ol_args->l_array_of_arrays_0 = l_array_of_arrays;
                ol_args->l_partial_sums_0 = l_partial_sums;
                ol_args->l_i_0 = l_i;
                err = nanos_submit(wd, 0, (nanos_data_access_t *) 0, (nanos_team_t) 0);
                if (err != NANOS_OK)
                    nanos_handle_error(err);
            }
            else
            {
                _nx_data_env_0_t imm_args;
                imm_args.l_array_of_arrays_0 = l_array_of_arrays;
                imm_args.l_partial_sums_0 = l_partial_sums;
                imm_args.l_i_0 = l_i;
                err = nanos_create_wd_and_run_compact((nanos_const_wd_definition_t *) &const_data1, &dyn_data1,  sizeof(_nx_data_env_0_t),
                       &imm_args, 0, (nanos_data_access_t *) 0, (nanos_copy_data_t *) 0, 0, NULL);
                if (err != NANOS_OK)
                    nanos_handle_error(err);
            }
        }
    }
    nanos_wg_wait_completion( nanos_current_wd(), 0 );
    l_total = 0;
    for (l_i = 0;
        l_i < l_num_procs;
        l_i++)
    {
        printf("%d -> %d\n", l_i, l_partial_sums[l_i]);
        l_total += l_partial_sums[l_i];
    }
    printf("Result = %d\n", l_total);
    return 0;
}
Beispiel #10
0
int main(int argc, char **argv)
{
  int i;
  int a[16];
  int it;
  int rv = 0;
  for (i = 0; i < 16; i++)
    {
      a[i] = 0;
    }
  for (it = 0; it < 4; it++)
    {
      {
        nanos_err_t nanos_err;
        nanos_wd_dyn_props_t dyn_props;
        unsigned int nth_i;
        struct nanos_args_1_t imm_args;
        nanos_data_access_t dependences[1];
        static nanos_smp_args_t smp_ol_main_1_args = {.outline = (void (*)(void *))(void (*)(struct nanos_args_1_t *))&smp_ol_main_1};
        static struct nanos_const_wd_definition_1 nanos_wd_const_data = {.base = {.props = {.mandatory_creation = 1, .tied = 1, .clear_chunk = 0, .reserved0 = 0, .reserved1 = 0, .reserved2 = 0, .reserved3 = 0, .reserved4 = 0}, .data_alignment = __alignof__(struct nanos_args_1_t), .num_copies = 0, .num_devices = 1, .num_dimensions = 0, .description = 0}, .devices = {[0] = {.factory = &nanos_smp_factory, .arg = &smp_ol_main_1_args}}};
        unsigned int nanos_num_threads = nanos_omp_get_num_threads_next_parallel(0);
        nanos_team_t nanos_team = (void *)0;
        nanos_thread_t nanos_team_threads[nanos_num_threads];
        nanos_err = nanos_create_team(&nanos_team, (void *)0, &nanos_num_threads, (nanos_constraint_t *)0, 1, nanos_team_threads, &nanos_wd_const_data.base);
        if (nanos_err != NANOS_OK)
          {
            nanos_handle_error(nanos_err);
          }
        dyn_props.tie_to = (void *)0;
        dyn_props.priority = 0;
        dyn_props.flags.is_final = 0;
        for (nth_i = 1; nth_i < nanos_num_threads; nth_i = nth_i + 1)
          {
            dyn_props.tie_to = nanos_team_threads[nth_i];
            struct nanos_args_1_t *ol_args = 0;
            nanos_wd_t nanos_wd_ = (void *)0;
            nanos_err = nanos_create_wd_compact(&nanos_wd_, &nanos_wd_const_data.base, &dyn_props, sizeof(struct nanos_args_1_t), (void **)&ol_args, nanos_current_wd(), (nanos_copy_data_t **)0, (nanos_region_dimension_internal_t **)0);
            if (nanos_err != NANOS_OK)
              {
                nanos_handle_error(nanos_err);
              }
            (*ol_args).i = &i;
            (*ol_args).a = &a;
            nanos_err = nanos_submit(nanos_wd_, 0, (nanos_data_access_t *)0, (void *)0);
            if (nanos_err != NANOS_OK)
              {
                nanos_handle_error(nanos_err);
              }
          }
        dyn_props.tie_to = nanos_team_threads[0];
        imm_args.i = &i;
        imm_args.a = &a;
        nanos_err = nanos_create_wd_and_run_compact(&nanos_wd_const_data.base, &dyn_props, sizeof(struct nanos_args_1_t), &imm_args, 0, dependences, (nanos_copy_data_t *)0, (nanos_region_dimension_internal_t *)0, (void (*)(void *, nanos_wd_t))0);
        if (nanos_err != NANOS_OK)
          {
            nanos_handle_error(nanos_err);
          }
        nanos_err = nanos_end_team(nanos_team);
        if (nanos_err != NANOS_OK)
          {
            nanos_handle_error(nanos_err);
          }
      }
    }
Beispiel #11
0
void NANOS_critical_end( void )
{
    nanos_err_t err = nanos_unset_lock( &nanos_default_critical_lock );
    if( err != NANOS_OK )
        nanos_handle_error( err );
}
Beispiel #12
0
void NANOS_parallel( void ( * func ) ( void * ), void * data, unsigned numThreads, long data_size, long ( *get_data_align )( void ), 
                     void * ( * get_empty_data )( void ), void ( * init_func ) ( void *, void * ) )
{
    nanos_err_t err;
    
    // Compute copy data (For SMP devices there are no copies. Just CUDA device requires copy data)
    int num_copies = 0;
    // TODO Compute dimensions
    int num_dimensions = 0;
    // Compute device descriptor (at the moment, only SMP is supported)
    int num_devices = 1;
    // TODO No dependencies for parallel construct in SMP devices
    int num_data_accesses = 0;
    nanos_data_access_t dependences[1];
  
    // Create the Device descriptor (at the moment, only SMP is supported)
    nanos_smp_args_t _smp_args = { func };
    char * parallel_name; 
    asprintf( &parallel_name, "parallel_%d", parallel_id++ );
    struct nanos_const_wd_definition nanos_wd_const_data = {
        { { 1,          // mandatory creation
            1,          // tied
            0, 0, 0, 0, 0, 0 },                     // properties 
            ( *get_data_align )( ),                 // data alignment
            num_copies, num_devices, num_dimensions,                            
            parallel_name                           // description
        }, 
        { { &nanos_smp_factory,                     // device description
            &_smp_args }                            // outlined function
        }
    };

    // Compute properties of the WD: mandatory creation, priority, tiedness, real-time info and copy declarations
    nanos_wd_dyn_props_t dyn_props;
    dyn_props.tie_to = ( void * ) 0;
    dyn_props.priority = 0;
    dyn_props.flags.is_final = 0;

    // Create the working team
    if( numThreads == 0 )
        numThreads = nanos_omp_get_num_threads_next_parallel( 0 );
    void * nanos_team = ( void * ) 0;
    const unsigned int nthreads_vla = numThreads;
    void * team_threads[nthreads_vla];
    err = nanos_create_team( &nanos_team, ( void * ) 0, &numThreads,
                             (nanos_constraint_t *) 0, /*reuse current*/ 1, team_threads );
    if( err != NANOS_OK )
        nanos_handle_error( err );
    
    // Create a wd tied to each thread
    unsigned nth_i;
    for( nth_i = 1; nth_i < numThreads; nth_i++ )
    {
        // Set properties to the current wd of the team
        dyn_props.tie_to = team_threads[nth_i];
        
        // Create the current WD of the team
        void * empty_data = ( *get_empty_data )( );
        void * wd = ( void * ) 0;
        err = nanos_create_wd_compact( &wd, &nanos_wd_const_data.base, &dyn_props, 
                                       data_size, ( void** ) &empty_data, 
                                       nanos_current_wd( ), ( nanos_copy_data_t ** ) 0, 
                                       ( nanos_region_dimension_internal_t ** ) 0 );
        if (err != NANOS_OK) 
            nanos_handle_error(err);
        
        // Initialize outlined data
        ( *init_func )( empty_data, data );
    
        // Submit work to the WD
        err = nanos_submit( wd, num_data_accesses, ( nanos_data_access_t * ) 0, ( void * ) 0 );
        if( err != NANOS_OK ) 
            nanos_handle_error( err );
    }

    // Create the wd for the master thread, which will run the team
    dyn_props.tie_to = team_threads[0];
    err = nanos_create_wd_and_run_compact( &nanos_wd_const_data.base, &dyn_props, data_size, data, 
                                           num_data_accesses, dependences, ( nanos_copy_data_t * ) 0,
                                           ( nanos_region_dimension_internal_t * ) 0, 
                                           ( void ( * )( void *, void * ) ) 0 );
    if( err != NANOS_OK )
        nanos_handle_error( err );

    // End the team
    err = nanos_end_team( nanos_team );
    if( err != NANOS_OK )
        nanos_handle_error( err );
}
Beispiel #13
0
void NANOS_reduction( int n_reductions,
                      void ( ** all_threads_reduction )( void * out, void * in, int num_scalars ),
                      void ( * func )( void * data, /*void** globals, */nanos_ws_desc_t * wsd ), void * data,
                      void ( ** copy_back )( int team_size, void * original, void * privates ),
                      void ( ** set_privates )( void * nanos_private, void ** global_data, int reduction_id, int thread ),
                      void ** global_th_data, void ** global_data, long * global_data_size,
                      nanos_ws_desc_t * wsd, const char * filename, int fileline )
{
    nanos_err_t err;
    
    err = nanos_omp_set_implicit( nanos_current_wd( ) );
    if( err != NANOS_OK )
        nanos_handle_error( err );
    
    bool red_single_guard;
    err = nanos_enter_sync_init( &red_single_guard );
    if( err != NANOS_OK )
        nanos_handle_error( err );
    
    nanos_reduction_t* result[n_reductions];

    int nanos_n_threads = nanos_omp_get_num_threads( );
//     void * _global_[ nanos_n_threads ];

    if( red_single_guard )
    {
        int i;
        for( i = 0; i < n_reductions; i++ )
        {
            err = nanos_malloc( ( void ** ) &result, sizeof( nanos_reduction_t ), filename, fileline );
            if( err != NANOS_OK )
                nanos_handle_error( err );
            ( * ( result[i] ) ).original = global_data[i];
            err = nanos_malloc( &( * ( result[i] ) ).privates, global_data_size[i] * nanos_n_threads, filename, fileline );
            if( err != NANOS_OK )
                nanos_handle_error( err );
            ( * ( result[i] ) ).descriptor = ( * ( result[i] ) ).privates;       // Fortran only
//             _global_[i] = (int *)( * ( result[i] ) ).privates;
            ( * ( result[i] ) ).vop = copy_back[i];
            ( * ( result[i] ) ).bop = all_threads_reduction[i];
            ( * ( result[i] ) ).element_size = global_data_size[i];
            ( * ( result[i] ) ).num_scalars = 1;
            ( * ( result[i] ) ).cleanup = nanos_free0;
            err = nanos_register_reduction( result[i] );
            if( err != NANOS_OK )
                nanos_handle_error( err );
        }
        
        err = nanos_release_sync_init( );
        if( err != NANOS_OK )
            nanos_handle_error( err );
    }
    else
    {
        err = nanos_wait_sync_init( );
        if( err != NANOS_OK )
            nanos_handle_error( err );
        
        int i;
        for( i = 0; i < n_reductions; i++ )
        {
            err = nanos_reduction_get( & ( result[i] ), global_data[i] );
            if( err != NANOS_OK )
                nanos_handle_error( err );
//             _global_[i] = (int *)( * ( result[i] ) ).privates;
        }
    }
    
    // Execute the function containing the reduction
    ( * func )( data, /*_global_, */wsd );

    // Point the 'privates' member to the actual private value computed in the reduction code
    // FIXME copy back data cannot be made at the end because 
    // the privates member is used before this copy back is performed
    int i;
    for( i = 0; i < n_reductions; i++ )
    {
        ( * ( set_privates[i] ) )( ( * ( result[i] ) ).privates, global_th_data, i, omp_get_thread_num( ) );
    }
}
Beispiel #14
0
static void NANOS_worksharing( int lb, int ub, int step, int chunk, char * description,
                               void ( * func ) ( void * data, nanos_ws_desc_t * wsd ), void * data, long data_size, long ( * get_data_align )( void ), 
                               void * empty_data, void ( * init_func ) ( void *, void * ), void * ws_policy, bool wait )
{
    nanos_err_t err;

    // Create the Worksharing
    bool single_guard;
    nanos_ws_desc_t * wsd;
    nanos_ws_info_loop_t ws_info_loop;
    ws_info_loop.lower_bound = lb;
    ws_info_loop.upper_bound = ub;
    ws_info_loop.loop_step = step;
    ws_info_loop.chunk_size = chunk;
    err = nanos_worksharing_create( &wsd, ws_policy, ( void ** ) &ws_info_loop, &single_guard );
    if( err != NANOS_OK )
        nanos_handle_error( err );

    if( single_guard )
    {
        int sup_threads;
        err = nanos_team_get_num_supporting_threads( &sup_threads );
        if( err != NANOS_OK )
            nanos_handle_error( err );
        if( sup_threads > 0 )
        {
            // Configure the Worksahring
            err = nanos_malloc( ( void ** ) &( *wsd ).threads, sizeof( void * ) * sup_threads, /*filename*/"", /*fileline*/0 );
            if( err != NANOS_OK )
                nanos_handle_error( err );
            err = nanos_team_get_supporting_threads( &( *wsd ).nths, ( *wsd ).threads );
            if( err != NANOS_OK )
                nanos_handle_error( err );
            
            // Create the WD and its properties
            void * wd = ( void * ) 0;
            nanos_wd_dyn_props_t props;
            props.tie_to = ( void * ) 0;
            props.priority = 0;
            props.flags.is_final = 0;
            
            // Compute copy data (For SMP devices there are no copies. Just CUDA device requires copy data)
            int num_copies = 0;
            // Compute dependencies (ROSE is not currently supporting dependencies among the tasks)
            int num_data_accesses = 0;
            // TODO Compute dimensions
            int num_dimensions = 0;
            // Compute device descriptor (at the moment, only SMP is supported)
            int num_devices = 1;
    
            // Create the slicer
            nanos_smp_args_t _smp_args = { func };
            struct nanos_const_wd_definition nanos_wd_const_data = { 
                { { 1,          // mandatory creation
                    1,          // tied
                    0, 0, 0, 0, 0, 0 },                         // properties 
                    ( *get_data_align )( ),                     // data alignment
                    num_copies, num_devices, num_dimensions,
                    description                                 // description
                }, 
                { { &nanos_smp_factory,                         // device description
                    &_smp_args }                                // outlined function
                } 
            };
            void * slicer = nanos_find_slicer( "replicate" );
            if( slicer == (void *)0 )
                nanos_handle_error( NANOS_UNIMPLEMENTED );
            
            struct sections_data_t* empty_data = ( struct sections_data_t * ) 0;
            err = nanos_create_sliced_wd( &wd, nanos_wd_const_data.base.num_devices, nanos_wd_const_data.devices, 
                                          data_size, nanos_wd_const_data.base.data_alignment, ( void ** ) &empty_data, 
                                          ( void ** ) 0, slicer, &nanos_wd_const_data.base.props, &props, 
                                          num_copies, ( nanos_copy_data_t ** ) 0, 
                                          num_dimensions, ( nanos_region_dimension_internal_t ** ) 0 );
            if( err != NANOS_OK )
                nanos_handle_error( err );
            
            // Initialize outlined data
            ( *init_func )( empty_data, data );
            
            // Submit the work to the runtime system
            err = nanos_submit( wd, num_data_accesses, ( nanos_data_access_t * ) 0, ( nanos_team_t ) 0 );
            if( err != NANOS_OK )
                nanos_handle_error( err );
            
            err = nanos_free( ( * wsd ).threads );
            if( err != NANOS_OK )
                nanos_handle_error( err );
        }
    }
    
    ( * func )( data, wsd );
    
    // Wait in case it is necessary
    if( wait )
    {
        err = nanos_omp_barrier( );
        if( err != NANOS_OK )
            nanos_handle_error( err );
    }
}
Beispiel #15
0
void NANOS_task( void ( * func ) ( void * ), void *data, 
                 long data_size, long ( * get_data_align ) ( void ), 
                 void * empty_data, void ( * init_func ) ( void *, void * ),
                 bool if_clause, unsigned untied,
                 int num_deps, int * deps_dir, void ** deps_data, 
                 int * deps_n_dims, nanos_region_dimension_t ** deps_dims, 
                 long int * deps_offset )
{
    nanos_err_t err;
    
    bool nanos_is_in_final;
    err = nanos_in_final( &nanos_is_in_final );
    if( nanos_is_in_final )
    {
        ( *func )( data );
    }
    else
    {
        // Compute copy data (For SMP devices there are no copies. Just CUDA device requires copy data)
        int num_copies = 0;
        // TODO Compute dimensions (for devices other than SMP)
        int num_dimensions = 0;
        // Compute device descriptor (at the moment, only SMP is supported)
        int num_devices = 1;
        // Compute dependencies
        const unsigned int num_data_accesses = num_deps;
        nanos_data_access_t dependences[num_data_accesses];
        int i;
        for( i = 0; i < num_data_accesses; ++i )
        {
            int in = ( deps_dir[i] & ( e_dep_dir_in | e_dep_dir_inout ) );
            int out = ( deps_dir[i] & ( e_dep_dir_out | e_dep_dir_inout ) );
            nanos_access_type_internal_t flags = {
                ( in != 0 ), // input
                ( out != 0 ), // output
                0 , // can rename
                0 , // concurrent
                0 , // commutative
            };
            nanos_data_access_t dep = { deps_data[i], flags, deps_n_dims[i], deps_dims[i], deps_offset[i] };
            dependences[i] = dep;
        }
        
        // Create the Device descriptor (at the moment, only SMP is supported)
        nanos_smp_args_t _smp_args = { func };
        char * task_name; 
        asprintf( &task_name, "task_%d", task_id++ );
        struct nanos_const_wd_definition nanos_wd_const_data = {
            { { 0,          // mandatory creation
                !untied,    // tied 
                0, 0, 0, 0, 0, 0 },                     // properties 
            ( *get_data_align )( ),                     // data alignment
            num_copies, num_devices, num_dimensions,
            task_name                                 // description
            }, 
            { { &nanos_smp_factory,                     // device description
                &_smp_args }                            // outlined function
            }
        };
        
        // Compute properties of the WD: mandatory creation, priority, tiedness, real-time info and copy declarations
        nanos_wd_dyn_props_t dyn_props;
        dyn_props.tie_to = 0;
        dyn_props.priority = 0;
        dyn_props.flags.is_final = 0;
    
        // Create the WD
        nanos_wd_t wd = (nanos_wd_t) 0;
        err = nanos_create_wd_compact( &wd, &nanos_wd_const_data.base, &dyn_props, 
                                    data_size, ( void ** ) &empty_data,
                                    nanos_current_wd( ), ( nanos_copy_data_t ** ) 0, 
                                    ( nanos_region_dimension_internal_t ** ) 0 );
        if( err != NANOS_OK ) 
            nanos_handle_error( err );
        
        if( wd != ( void * ) 0 )
        {   // Submit the task to the existing actual working group
            // Initialize outlined data
            ( *init_func )( empty_data, data );
    
            err = nanos_submit( wd, num_data_accesses, dependences, ( void * ) 0 );
            if( err != NANOS_OK ) 
                nanos_handle_error( err );
        }
        else
        { // The task must be run immediately
            err = nanos_create_wd_and_run_compact( &nanos_wd_const_data.base, &dyn_props, 
                                                data_size, data, num_data_accesses,
                                                dependences, ( nanos_copy_data_t * ) 0, 
                                                ( nanos_region_dimension_internal_t * ) 0, 
                                                ( void ( * )( void *, void * ) ) 0 );
            if( err != NANOS_OK ) 
                nanos_handle_error( err );
        }
    }
}