Пример #1
0
int *dijkstra_distance ( int ohd[NV][NV]  )

/******************************************************************************/
/*
  Purpose:

    DIJKSTRA_DISTANCE uses Dijkstra's minimum distance algorithm.

  Discussion:

    We essentially build a tree.  We start with only node 0 connected
    to the tree, and this is indicated by setting CONNECTED[0] = 1.

    We initialize MIND[I] to the one step distance from node 0 to node I.
    
    Now we search among the unconnected nodes for the node MV whose minimum
    distance is smallest, and connect it to the tree.  For each remaining
    unconnected node I, we check to see whether the distance from 0 to MV
    to I is less than that recorded in MIND[I], and if so, we can reduce
    the distance.

    After NV-1 steps, we have connected all the nodes to 0, and computed
    the correct minimum distances.

  Licensing:

    This code is distributed under the GNU LGPL license. 

  Modified:

    02 July 2010

  Author:

    Original C version by Norm Matloff, CS Dept, UC Davis.
    This C version by John Burkardt.

  Parameters:

    Input, int OHD[NV][NV], the distance of the direct link between
    nodes I and J.

    Output, int DIJKSTRA_DISTANCE[NV], the minimum distance from 
    node 0 to each node.
*/
{
  int *connected;
  int i;
  int i4_huge = 2147483647;
  int md;
  int *mind;
  int mv;
  int my_first;
  int my_id;
  int my_last;
  int my_md;
  int my_mv;
  int my_step;
  int nth;
/*
  Start out with only node 0 connected to the tree.
*/
  connected = ( int * ) malloc ( NV * sizeof ( int ) );

  connected[0] = 1;
  for ( i = 1; i < NV; i++ )
  {
    connected[i] = 0;
  }
/*
  Initial estimate of minimum distance is the 1-step distance.
*/
  mind = ( int * ) malloc ( NV * sizeof ( int ) );

  for ( i = 0; i < NV; i++ )
  {
    mind[i] = ohd[0][i];
  }
/*
  Begin the parallel region.
*/
  # pragma omp parallel private ( my_first, my_id, my_last, my_md, my_mv, my_step ) \
  shared ( connected, md, mind, mv, nth, ohd )
  {
    my_id = omp_get_thread_num ( );
    nth = omp_get_num_threads ( ); 
    my_first =   (   my_id       * NV ) / nth;
    my_last  =   ( ( my_id + 1 ) * NV ) / nth - 1;
/*
  The SINGLE directive means that the block is to be executed by only
  one thread, and that thread will be whichever one gets here first.
*/
    # pragma omp single
    {
      printf ( "\n" );
      printf ( "  P%d: Parallel region begins with %d threads\n", my_id, nth );
      printf ( "\n" );
    }
    fprintf ( stdout, "  P%d:  First=%d  Last=%d\n", my_id, my_first, my_last );

    for ( my_step = 1; my_step < NV; my_step++ )
    {
/*
  Before we compare the results of each thread, set the shared variable 
  MD to a big value.  Only one thread needs to do this.
*/
      # pragma omp single 
      {
        md = i4_huge;
        mv = -1; 
      }
/*
  Each thread finds the nearest unconnected node in its part of the graph.
  Some threads might have no unconnected nodes left.
*/
      find_nearest ( my_first, my_last, mind, connected, &my_md, &my_mv );
/*
  In order to determine the minimum of all the MY_MD's, we must insist
  that only one thread at a time execute this block!
*/
      # pragma omp critical
      {
        if ( my_md < md )  
        {
          md = my_md;
          mv = my_mv;
        }
      }
/*
  This barrier means that ALL threads have executed the critical
  block, and therefore MD and MV have the correct value.  Only then
  can we proceed.
*/
      # pragma omp barrier
/*
  If MV is -1, then NO thread found an unconnected node, so we're done early. 
  OpenMP does not like to BREAK out of a parallel region, so we'll just have 
  to let the iteration run to the end, while we avoid doing any more updates.

  Otherwise, we connect the nearest node.
*/
      # pragma omp single 
      {
        if ( mv != - 1 )
        {
          connected[mv] = 1;
          printf ( "  P%d: Connecting node %d.\n", my_id, mv );
        }
      }
/*
  Again, we don't want any thread to proceed until the value of
  CONNECTED is updated.
*/
      # pragma omp barrier
/*
  Now each thread should update its portion of the MIND vector,
  by checking to see whether the trip from 0 to MV plus the step
  from MV to a node is closer than the current record.
*/
      if ( mv != -1 )
      {
        update_mind ( my_first, my_last, mv, connected, ohd, mind );
      }
/*
  Before starting the next step of the iteration, we need all threads 
  to complete the updating, so we set a BARRIER here.
*/
      #pragma omp barrier
    }
/*
  Once all the nodes have been connected, we can exit.
*/
    # pragma omp single
    {
      printf ( "\n" );
      printf ( "  P%d: Exiting parallel region.\n", my_id );
    }
  }

  free ( connected );

  return mind;
}
Пример #2
0
int *dijkstra_distance ( int ohd[vertex_count][vertex_count]  )
{
    int *connected;
    int i;
    int md;
    int *mind;
    int mv;
    int my_first;
    int my_id;
    int my_last;
    int my_md;
    int my_mv;
    int my_step;
    int nth;
    /*
       Start out with only node 0 connected to the tree.
     */
    connected = ( int * ) malloc ( vertex_count * sizeof ( int ) );

    connected[0] = 1;
    for ( i = 1; i < vertex_count; i++ )
    {
        connected[i] = 0;
    }
    /*
       Initial estimate of minimum distance is the 1-step distance.
     */
    mind = ( int * ) malloc ( vertex_count * sizeof ( int ) );

    for ( i = 0; i < vertex_count; i++ )
    {
        mind[i] = ohd[0][i];
    }
    /*
       Begin the parallel region.
     */
    # pragma omp parallel private ( my_first, my_id, my_last, my_md, my_mv, my_step ) \
    shared ( connected, md, mind, mv, nth, ohd )
    {
        my_id = omp_get_thread_num ( );
        nth = omp_get_num_threads ( );
        my_first =   (   my_id       * vertex_count ) / nth;
        my_last  =   ( ( my_id + 1 ) * vertex_count ) / nth - 1;
        /*
           The SINGLE directive means that the block is to be executed by only
           one thread, and that thread will be whichever one gets here first.
         */
        # pragma omp single
        {
            log(DEBUG, my_id, "P%d: Parallel region begins with %d threads", my_id, nth );
        }

        log(DEBUG, my_id, "P%d:  First=%d  Last=%d", my_id, my_first, my_last );

        for ( my_step = 1; my_step < vertex_count; my_step++ )
        {
            /*
               Before we compare the results of each thread, set the shared variable
               MD to a big value.  Only one thread needs to do this.
             */
            # pragma omp single
            {
                md = INT_MAX;
                mv = -1;
            }
            /*
               Each thread finds the nearest unconnected node in its part of the graph.
               Some threads might have no unconnected nodes left.
             */
            find_nearest ( my_first, my_last, mind, connected, &my_md, &my_mv, my_id );
            /*
               In order to determine the minimum of all the MY_MD's, we must insist
               that only one thread at a time execute this block!
             */
            # pragma omp critical
            {
                if ( my_md < md )
                {
                    md = my_md;
                    mv = my_mv;
                }
            }
            /*
               This barrier means that ALL threads have executed the critical
               block, and therefore MD and MV have the correct value.  Only then
               can we proceed.
             */
            # pragma omp barrier
            /*
               If MV is -1, then NO thread found an unconnected node, so we're done early.
               OpenMP does not like to BREAK out of a parallel region, so we'll just have
               to let the iteration run to the end, while we avoid doing any more updates.

               Otherwise, we connect the nearest node.
             */
            # pragma omp single
            {
                if ( mv != - 1 )
                {
                    connected[mv] = 1;
                    log(DEBUG, my_id, "P%d: Connecting node %d", my_id, mv );
                    log(DEBUG, -1, "----------------------------");
                }
            }
            /*
               Again, we don't want any thread to proceed until the value of
               CONNECTED is updated.
             */
            # pragma omp barrier
            /*
               Now each thread should update its portion of the MIND vector,
               by checking to see whether the trip from 0 to MV plus the step
               from MV to a node is closer than the current record.
             */
            if ( mv != -1 )
            {
                update_mind ( my_first, my_last, mv, connected, ohd, mind, my_id );
            }
            /*
               Before starting the next step of the iteration, we need all threads
               to complete the updating, so we set a BARRIER here.
             */
            #pragma omp barrier
        }
        /*
           Once all the nodes have been connected, we can exit.
         */
        # pragma omp single
        {
            log(DEBUG, my_id, "P%d: Exiting parallel region.", my_id );
        }
    }

    free ( connected );

    return mind;
}
Пример #3
0
int *dijkstra_distance ( int ohd[vertex_count][vertex_count] )
{
    int *connected;
    int i;
    int my_min[2];
    int all_min[2];
    int *mind, *commonmind;
    int nth;
    int rc;

    connected = ( int * ) malloc ( vertex_count * sizeof ( int ) );

    connected[0] = 1;
    for ( i = 1; i < vertex_count; i++ )
    {
        connected[i] = 0;
    }

    mind = ( int * ) malloc ( vertex_count * sizeof ( int ) );
    commonmind = ( int * ) malloc ( vertex_count * sizeof ( int ) );

    for ( i = 0; i < vertex_count; i++ )
    {
        mind[i] = ohd[0][i];
    }

    my_first =   (   tid       * vertex_count ) / ntasks;
    my_last  =   ( ( tid + 1 ) * vertex_count ) / ntasks - 1;

    log_d(tid, "First=%d  Last=%d", my_first, my_last );

    for ( i = 1; i < vertex_count; i++ )
    {
        my_min[0] = INT_MAX;
        my_min[1] = -1;

        find_nearest ( mind, connected, my_min );

        if(MPI_Allreduce(my_min, all_min, 1, MPI_2INT, MPI_MINLOC, MPI_COMM_WORLD) != MPI_SUCCESS)
        {
            log_e(tid, "MPI_Allreduce failed!");
        }

        tid == 0 ? log_i(tid, "Common minimal node %d with distance %d", all_min[1], all_min[0]) : 0 ;

        if ( all_min[1] != -1 )
        {
            tid == 0 ? log_d(tid, "Connecting node %d", all_min[1]) : 0 ;

            connected[all_min[1]] = 1;
            update_mind ( all_min[1], connected, ohd, mind);
        }
        
        if(MPI_Allreduce(mind, commonmind, vertex_count, MPI_INT, MPI_MIN, MPI_COMM_WORLD) != MPI_SUCCESS)
        {
            log_e(tid, "MPI_Allreduce failed!");
        }

        for ( i = 0; i < vertex_count; i++ )
        {
            mind[i] = commonmind[i];
        }
    }

    free ( connected );
    free ( mind );

    return commonmind;
}