Exemple #1
0
int tMPI_Thread_barrier_init(tMPI_Thread_barrier_t *barrier, int n)
{
    if(barrier==NULL)
    {
        return EINVAL;
    }

    barrier->barrierp=(struct tMPI_Thread_barrier*)
              tMPI_Malloc(sizeof(struct tMPI_Thread_barrier)*1);

#if 0
 /* use this once Vista is the oldest supported windows version: */
    InitializeCriticalSection(&(barrier->barrierp->cs));
    InitializeConditionVariable(&(barrier->barrierp->cv));
#else
    tMPI_Thread_mutex_init(&(barrier->barrierp->cs));
    tMPI_Thread_cond_init(&(barrier->barrierp->cv));
#endif

    barrier->threshold = n;
    barrier->count     = n;
    barrier->cycle     = 0;

    return 0;
}
Exemple #2
0
static int tMPI_Thread_mutex_init_once(tMPI_Thread_mutex_t *mtx)
{
    int ret=0;

    /* This is essentially a copy of the code from the one-time
     * initialization, but with a call to the mutex init routine instead.
     * It might seem like overkill, but it will only be executed the first
     * time you call a static mutex, and it is important to get all the 
     * memory barriers right. Trust me, you don't want a deadlock here...
     */ 

    /* initialize the initializers */
    tMPI_Init_initers();
    /* Lock the common one-time init mutex so we can check carefully */
    EnterCriticalSection( &mutex_init );

    /* Do the actual (locked) check - system mutex is locked if we get here */
    if (mtx->mutex == NULL)
    {
        /* No need to keep the lock during execution -
         * Only one thread can do it anyway.
         */
        ret=tMPI_Thread_mutex_init(mtx);
    }
    LeaveCriticalSection( &mutex_init );

    return ret;
}
Exemple #3
0
void tMPI_Spinlock_init( tMPI_Spinlock_t *x)
{
    tMPI_Thread_mutex_lock(&tMPI_Atomic_mutex);
    *x         = (tMPI_Spinlock_t)malloc(sizeof(tMPI_Spinlock_t));
    (*x)->lock = (tMPI_Thread_mutex_t*)malloc(sizeof(tMPI_Thread_mutex_t));
    tMPI_Thread_mutex_init((*x)->lock);
    tMPI_Thread_mutex_unlock(&tMPI_Atomic_mutex);
}
Exemple #4
0
/* NOTE: assumes atomic mutex is locked */
static void tMPI_Spinlock_init_once(tMPI_Spinlock_t *x)
{
    tMPI_Thread_mutex_lock(&tMPI_Atomic_mutex);
    if (!*x)
    {
        *x         = (tMPI_Spinlock_t)malloc(sizeof(tMPI_Spinlock_t));
        (*x)->lock = (tMPI_Thread_mutex_t*)malloc(sizeof(tMPI_Thread_mutex_t));
        tMPI_Thread_mutex_init((*x)->lock);
    }
    tMPI_Thread_mutex_unlock(&tMPI_Atomic_mutex);
}
Exemple #5
0
static void tMPI_Global_init(struct tmpi_global *g, int Nthreads)
{
    g->usertypes        = NULL;
    g->N_usertypes      = 0;
    g->Nalloc_usertypes = 0;
    tMPI_Thread_mutex_init(&(g->timer_mutex));
    tMPI_Spinlock_init(&(g->datatype_lock));

    tMPI_Thread_barrier_init( &(g->barrier), Nthreads);

    tMPI_Thread_mutex_init(&(g->comm_link_lock));

#if !(defined( _WIN32 ) || defined( _WIN64 ) )
    /* the time at initialization. */
    gettimeofday( &(g->timer_init), NULL);
#else
    /* the time at initialization. */
    g->timer_init = GetTickCount();
#endif

}
Exemple #6
0
int tMPI_Comm_alloc(tMPI_Comm *newcomm, tMPI_Comm parent, int N)
{
    struct tmpi_comm_ *retc;
    int                i;
    int                ret;

    retc = (struct tmpi_comm_*)tMPI_Malloc(sizeof(struct tmpi_comm_));
    if (retc == NULL)
    {
        return TMPI_ERR_NO_MEM;
    }

    retc->grp.peers = (struct tmpi_thread**)tMPI_Malloc(
                sizeof(struct tmpi_thread*)*Nthreads);
    if (retc->grp.peers == NULL)
    {
        return TMPI_ERR_NO_MEM;
    }
    retc->grp.N = N;

    ret = tMPI_Thread_mutex_init( &(retc->comm_create_lock) );
    if (ret != 0)
    {
        return tMPI_Error(TMPI_COMM_WORLD, TMPI_ERR_IO);
    }
    ret = tMPI_Thread_cond_init( &(retc->comm_create_prep) );
    if (ret != 0)
    {
        return tMPI_Error(TMPI_COMM_WORLD, TMPI_ERR_IO);
    }
    ret = tMPI_Thread_cond_init( &(retc->comm_create_finish) );
    if (ret != 0)
    {
        return tMPI_Error(TMPI_COMM_WORLD, TMPI_ERR_IO);
    }

    retc->split    = NULL;
    retc->new_comm = NULL;
    /* we have no topology to start out with */
    retc->cart = NULL;
    /*retc->graph=NULL;*/

    /* we start counting at 0 */
    tMPI_Atomic_set( &(retc->destroy_counter), 0);

    /* initialize the main barrier */
    tMPI_Barrier_init(&(retc->barrier), N);

    /* the reduce barriers */
    {
        /* First calculate the number of reduce barriers */
        int Niter = 0; /* the iteration number */
        int Nred  = N; /* the number of reduce barriers for this iteration */
        while (Nred > 1)
        {
            /* Nred is now Nred/2 + a rest term because solitary
               process at the end of the list must still be accounter for */
            Nred   = Nred/2 + Nred%2;
            Niter += 1;
        }

        retc->N_reduce_iter = Niter;
        /* allocate the list */
        retc->reduce_barrier = (tMPI_Barrier_t**)
            tMPI_Malloc(sizeof(tMPI_Barrier_t*)*(Niter+1));
        if (retc->reduce_barrier == NULL)
        {
            return TMPI_ERR_NO_MEM;
        }
        retc->N_reduce = (int*)tMPI_Malloc(sizeof(int)*(Niter+1));
        if (retc->N_reduce == NULL)
        {
            return TMPI_ERR_NO_MEM;
        }

        /* we re-set Nred to N */
        Nred = N;
        for (i = 0; i < Niter; i++)
        {
            int j;

            Nred              = Nred/2 + Nred%2;
            retc->N_reduce[i] = Nred;
            /* allocate the sub-list */
            retc->reduce_barrier[i] = (tMPI_Barrier_t*)
                tMPI_Malloc(sizeof(tMPI_Barrier_t)*(Nred));
            if (retc->reduce_barrier[i] == NULL)
            {
                return TMPI_ERR_NO_MEM;
            }
            for (j = 0; j < Nred; j++)
            {
                tMPI_Barrier_init(&(retc->reduce_barrier[i][j]), 2);
            }
        }
    }

    /* the reduce buffers */
    retc->reduce_sendbuf = (tMPI_Atomic_ptr_t*)
        tMPI_Malloc(sizeof(tMPI_Atomic_ptr_t)*Nthreads);
    if (retc->reduce_sendbuf == NULL)
    {
        return TMPI_ERR_NO_MEM;
    }
    retc->reduce_recvbuf = (tMPI_Atomic_ptr_t*)
        tMPI_Malloc(sizeof(tMPI_Atomic_ptr_t)*Nthreads);
    if (retc->reduce_recvbuf == NULL)
    {
        return TMPI_ERR_NO_MEM;
    }

    if (parent)
    {
        retc->erh = parent->erh;
    }
    else
    {
        retc->erh = TMPI_ERRORS_ARE_FATAL;
    }

    /* coll_env objects */
    retc->cev = (struct coll_env*)tMPI_Malloc(sizeof(struct coll_env)*
                                              N_COLL_ENV);
    if (retc->cev == NULL)
    {
        return TMPI_ERR_NO_MEM;
    }

    for (i = 0; i < N_COLL_ENV; i++)
    {
        ret = tMPI_Coll_env_init( &(retc->cev[i]), N);
        if (ret != TMPI_SUCCESS)
        {
            return ret;
        }
    }
    /* multi_sync objects */
    retc->csync = (struct coll_sync*)tMPI_Malloc(sizeof(struct coll_sync)*N);
    if (retc->csync == NULL)
    {
        return TMPI_ERR_NO_MEM;
    }

    for (i = 0; i < N; i++)
    {
        ret = tMPI_Coll_sync_init( &(retc->csync[i]), N);
        if (ret != TMPI_SUCCESS)
        {
            return ret;
        }
    }

    ret = tMPI_Thread_mutex_lock( &(tmpi_global->comm_link_lock) );
    if (ret != 0)
    {
        return tMPI_Error(TMPI_COMM_WORLD, TMPI_ERR_IO);
    }
    /* we insert ourselves in the circular list, after TMPI_COMM_WORLD */
    if (TMPI_COMM_WORLD)
    {
        retc->next = TMPI_COMM_WORLD;
        retc->prev = TMPI_COMM_WORLD->prev;

        TMPI_COMM_WORLD->prev->next = retc;
        TMPI_COMM_WORLD->prev       = retc;
    }
    else
    {
        retc->prev = retc->next = retc;
    }
    ret = tMPI_Thread_mutex_unlock( &(tmpi_global->comm_link_lock) );
    if (ret != 0)
    {
        return tMPI_Error(TMPI_COMM_WORLD, TMPI_ERR_IO);
    }
    *newcomm = retc;
    return TMPI_SUCCESS;
}