コード例 #1
0
ファイル: oshmpi-mcs-lock.c プロジェクト: jeffhammond/oshmpi
int oshmpi_trylock(long * lockp)
{
  int is_locked = -1, nil = -1;
  oshmpi_lock_t *lock = (oshmpi_lock_t *) lockp;
  lock->prev = -1;
  /* Get the last tail, if -1 replace with me */
  MPI_Compare_and_swap (&shmem_world_rank, &nil, &(lock->prev), MPI_INT,
			TAIL, TAIL_DISP, oshmpi_lock_win);
  MPI_Win_flush (TAIL, oshmpi_lock_win);
  /* Find if the last proc is holding lock */
  if (lock->prev != -1)
    {
      MPI_Fetch_and_op (NULL, &is_locked, MPI_INT, lock->prev,
			LOCK_DISP, MPI_NO_OP, oshmpi_lock_win);
      MPI_Win_flush (lock->prev, oshmpi_lock_win);

      if (is_locked)
	return 0;
    }
  /* Add myself in tail */
  MPI_Fetch_and_op (&shmem_world_rank, &(lock->prev), MPI_INT, TAIL,
		    TAIL_DISP, MPI_REPLACE, oshmpi_lock_win);
  MPI_Win_flush (TAIL, oshmpi_lock_win);
  /* Hold lock */
  oshmpi_lock_base[LOCK_DISP] = 1;
  MPI_Win_sync (oshmpi_lock_win);

  return 1;
}
コード例 #2
0
ファイル: mcs-lock.c プロジェクト: coti/oshmpi
/** Unlock a mutex.
 *
 * @param[in] hdl   Handle to the mutex
 * @return          MPI status
 */
int MCS_Mutex_unlock(MCS_Mutex hdl)
{
	int next;

	/* Read my next pointer.  FOP is used since another process may write to
	 * this location concurrent with this read. */
	MPI_Fetch_and_op(NULL, &next, MPI_INT, shmem_world_rank, 
			MCS_MTX_ELEM_DISP, MPI_NO_OP, hdl->window);
	MPI_Win_flush(shmem_world_rank, hdl->window);

	if ( next == -1) {
		int tail;
		int nil = -1;

		/* Check if we are the at the tail of the lock queue.  If so, we're
		 * done.  If not, we need to send notification. */
		MPI_Compare_and_swap(&nil, &shmem_world_rank, &tail, MPI_INT, hdl->tail_rank,
				MCS_MTX_TAIL_DISP, hdl->window);
		MPI_Win_flush(hdl->tail_rank, hdl->window);

		if (tail != shmem_world_rank) {
			debug_print("%2d: UNLOCK - waiting for next pointer (tail = %d)\n", shmem_world_rank, tail);
			assert(tail >= 0 && tail < shmem_world_size);

			for (;;) {
				int flag;

				MPI_Fetch_and_op(NULL, &next, MPI_INT, shmem_world_rank, MCS_MTX_ELEM_DISP,
						MPI_NO_OP, hdl->window);

				MPI_Win_flush(shmem_world_rank, hdl->window);
				if (next != -1) break;

                                /* Is this here just to poke progress?  If yes, then that is lame. */
				MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &flag, MPI_STATUS_IGNORE);
			}
		}
	}

	/* Notify the next waiting process */
	if (next != -1) {
		debug_print("%2d: UNLOCK - notifying %d\n", shmem_world_rank, next);
		MPI_Send(NULL, 0, MPI_BYTE, next, MCS_MUTEX_TAG, hdl->comm);
	}

	debug_print("%2d: UNLOCK - lock released\n", shmem_world_rank);

	return MPI_SUCCESS;
}
コード例 #3
0
ファイル: mcs-lock.c プロジェクト: coti/oshmpi
/** Attempt to acquire a mutex.
 *
 * @param[in] hdl   Handle to the mutex
 * @param[out] success Indicates whether the mutex was acquired
 * @return          MPI status
 */
int MCS_Mutex_trylock(MCS_Mutex hdl, int *success)
{
	int tail, nil = -1;

	/* This store is safe, since it cannot happen concurrently with a remote
	 * write */
	hdl->base[MCS_MTX_ELEM_DISP] = -1;
	MPI_Win_sync(hdl->window);

	/* Check if the lock is available and claim it if it is. */
	MPI_Compare_and_swap(&shmem_world_rank, &nil, &tail, MPI_INT, hdl->tail_rank,
			MCS_MTX_TAIL_DISP, hdl->window);
	MPI_Win_flush(hdl->tail_rank, hdl->window);

	/* If the old tail was -1, we have claimed the mutex */
	*success = (tail == nil) ? 0 : 1;

	debug_print("%2d: TRYLOCK - %s\n", shmem_world_rank, (*success) ? "Success" : "Non-success");

	return MPI_SUCCESS;
}
コード例 #4
0
dart_ret_t dart_lock_try_acquire (dart_lock_t lock, int32_t *is_acquired)
{
	dart_unit_t unitid;
	dart_team_myid (lock -> teamid, &unitid);
	if (lock -> is_acquired == 1)
	{
		printf ("Warning: TRYLOCK	- %2d has acquired the lock already\n", unitid);
		return DART_OK;
	}
	dart_gptr_t gptr_tail;

	int32_t result[1];
	int32_t compare[1] = {-1};

	DART_GPTR_COPY(gptr_tail, lock -> gptr_tail);
	dart_unit_t tail = gptr_tail.unitid;
	uint64_t offset = gptr_tail.addr_or_offs.offset;

	/* Atomicity: Check if the lock is available and claim it if it is. */
  MPI_Compare_and_swap (&unitid, compare, result, MPI_INT32_T, tail, offset, dart_win_local_alloc);
	MPI_Win_flush (tail, dart_win_local_alloc);

	/* If the old predecessor was -1, we will claim the lock, otherwise, do nothing. */
	if (*result == -1)
	{
		lock -> is_acquired = 1;
		*is_acquired = 1;
	}
	else
	{
		*is_acquired = 0;
	}
	DART_LOG_DEBUG("dart_lock_try_acquire: trylock %s in team %d",
                 ((*is_acquired) ? "succeeded" : "failed"),
                 (lock -> teamid));
	return DART_OK;
}
コード例 #5
0
ファイル: linked_list_fop.c プロジェクト: R7R8/simgrid
int main(int argc, char **argv) {
    int           procid, nproc, i;
    MPI_Win       llist_win;
    llist_ptr_t   head_ptr, tail_ptr;

    MPI_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &procid);
    MPI_Comm_size(MPI_COMM_WORLD, &nproc);

    MPI_Win_create_dynamic(MPI_INFO_NULL, MPI_COMM_WORLD, &llist_win);

    /* Process 0 creates the head node */
    if (procid == 0)
        head_ptr.disp = alloc_elem(-1, llist_win);

    /* Broadcast the head pointer to everyone */
    head_ptr.rank = 0;
    MPI_Bcast(&head_ptr.disp, 1, MPI_AINT, 0, MPI_COMM_WORLD);
    tail_ptr = head_ptr;

    /* All processes concurrently append NUM_ELEMS elements to the list */
    for (i = 0; i < NUM_ELEMS; i++) {
        llist_ptr_t new_elem_ptr;
        int success;

        /* Create a new list element and register it with the window */
        new_elem_ptr.rank = procid;
        new_elem_ptr.disp = alloc_elem(procid, llist_win);

        /* Append the new node to the list.  This might take multiple attempts if
           others have already appended and our tail pointer is stale. */
        do {
            llist_ptr_t next_tail_ptr = nil;

            MPI_Win_lock(MPI_LOCK_SHARED, tail_ptr.rank, MPI_MODE_NOCHECK, llist_win);

            MPI_Compare_and_swap((void*) &new_elem_ptr.rank, (void*) &nil.rank,
                                  (void*) &next_tail_ptr.rank, MPI_INT, tail_ptr.rank,
                                  (MPI_Aint) &(((llist_elem_t*)tail_ptr.disp)->next.rank), llist_win);

            MPI_Win_unlock(tail_ptr.rank, llist_win);
            success = (next_tail_ptr.rank == nil.rank);

            if (success) {
                int i, flag;
                MPI_Aint result;

                MPI_Win_lock(MPI_LOCK_SHARED, tail_ptr.rank, MPI_MODE_NOCHECK, llist_win);

                MPI_Fetch_and_op(&new_elem_ptr.disp, &result, MPI_AINT, tail_ptr.rank,
                                  (MPI_Aint) &(((llist_elem_t*)tail_ptr.disp)->next.disp),
                                  MPI_REPLACE, llist_win);

                /* Note: accumulate is faster, since we don't need the result.  Replacing with
                   Fetch_and_op to create a more complete test case. */
                /*
                MPI_Accumulate(&new_elem_ptr.disp, 1, MPI_AINT, tail_ptr.rank,
                               (MPI_Aint) &(((llist_elem_t*)tail_ptr.disp)->next.disp), 1,
                               MPI_AINT, MPI_REPLACE, llist_win);
                */

                MPI_Win_unlock(tail_ptr.rank, llist_win);
                tail_ptr = new_elem_ptr;

                /* For implementations that use pt-to-pt messaging, force progress for other threads'
                   RMA operations. */
                for (i = 0; i < NPROBE; i++)
                    MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &flag, MPI_STATUS_IGNORE);

            } else {
                /* Tail pointer is stale, fetch the displacement.  May take multiple tries
                   if it is being updated. */
                do {
                    MPI_Aint junk = 0;

                    MPI_Win_lock(MPI_LOCK_SHARED, tail_ptr.rank, MPI_MODE_NOCHECK, llist_win);

                    MPI_Fetch_and_op(NULL, &next_tail_ptr.disp, MPI_AINT, tail_ptr.rank,
                                      (MPI_Aint) &(((llist_elem_t*)tail_ptr.disp)->next.disp),
                                      MPI_NO_OP, llist_win);

                    MPI_Win_unlock(tail_ptr.rank, llist_win);
                } while (next_tail_ptr.disp == nil.disp);
                tail_ptr = next_tail_ptr;
            }
        } while (!success);
    }

    MPI_Barrier(MPI_COMM_WORLD);

    /* Traverse the list and verify that all processes inserted exactly the correct
       number of elements. */
    if (procid == 0) {
        int  have_root = 0;
        int  errors    = 0;
        int *counts, count = 0;

        counts = (int*) malloc(sizeof(int) * nproc);
        assert(counts != NULL);

        for (i = 0; i < nproc; i++)
            counts[i] = 0;

        tail_ptr = head_ptr;

        /* Walk the list and tally up the number of elements inserted by each rank */
        while (tail_ptr.disp != nil.disp) {
            llist_elem_t elem;

            MPI_Win_lock(MPI_LOCK_SHARED, tail_ptr.rank, MPI_MODE_NOCHECK, llist_win);

            MPI_Get(&elem, sizeof(llist_elem_t), MPI_BYTE,
                    tail_ptr.rank, tail_ptr.disp, sizeof(llist_elem_t), MPI_BYTE, llist_win);

            MPI_Win_unlock(tail_ptr.rank, llist_win);

            tail_ptr = elem.next;

            /* This is not the root */
            if (have_root) {
                assert(elem.value >= 0 && elem.value < nproc);
                counts[elem.value]++;
                count++;

                if (verbose) {
                    int last_elem = tail_ptr.disp == nil.disp;
                    printf("%2d%s", elem.value, last_elem ? "" : " -> ");
                    if (count % ELEM_PER_ROW == 0 && !last_elem)
                        printf("\n");
                }
            }

            /* This is the root */
            else {
                assert(elem.value == -1);
                have_root = 1;
            }
        }

        if (verbose)
          printf("\n\n");

        /* Verify the counts we collected */
        for (i = 0; i < nproc; i++) {
            int expected = NUM_ELEMS;

            if (counts[i] != expected) {
                printf("Error: Rank %d inserted %d elements, expected %d\n", i, counts[i], expected);
                errors++;
            }
        }

        printf("%s\n", errors == 0 ? " No Errors" : "FAIL");
        free(counts);
    }

    MPI_Win_free(&llist_win);

    /* Free all the elements in the list */
    for ( ; my_elems_count > 0; my_elems_count--)
        MPI_Free_mem(my_elems[my_elems_count-1]);

    MPI_Finalize();
    return 0;
}
コード例 #6
0
int main(int argc, char *argv[])
{
    int rank, size, i, j, k;
    int errors = 0;
    int origin_shm, origin_am, dest;
    int *orig_buf = NULL, *result_buf = NULL, *compare_buf = NULL,
        *target_buf = NULL, *check_buf = NULL;
    MPI_Win win;
    MPI_Status status;

    MPI_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    if (size != 3) {
        /* run this test with three processes */
        goto exit_test;
    }

    /* this works when MPIR_PARAM_CH3_ODD_EVEN_CLIQUES is set */
    dest = 2;
    origin_shm = 0;
    origin_am = 1;

    if (rank != dest) {
        MPI_Alloc_mem(sizeof(int), MPI_INFO_NULL, &orig_buf);
        MPI_Alloc_mem(sizeof(int), MPI_INFO_NULL, &result_buf);
        MPI_Alloc_mem(sizeof(int), MPI_INFO_NULL, &compare_buf);
    }

    MPI_Win_allocate(sizeof(int), sizeof(int), MPI_INFO_NULL, MPI_COMM_WORLD, &target_buf, &win);

    for (k = 0; k < LOOP_SIZE; k++) {

        /* init buffers */
        if (rank == origin_shm) {
            orig_buf[0] = 1;
            compare_buf[0] = 0;
            result_buf[0] = 0;
        }
        else if (rank == origin_am) {
            orig_buf[0] = 0;
            compare_buf[0] = 1;
            result_buf[0] = 0;
        }
        else {
            MPI_Win_lock(MPI_LOCK_SHARED, rank, 0, win);
            target_buf[0] = 0;
            MPI_Win_unlock(rank, win);
        }

        MPI_Barrier(MPI_COMM_WORLD);

        /* perform FOP */
        MPI_Win_lock_all(0, win);
        if (rank != dest) {
            MPI_Compare_and_swap(orig_buf, compare_buf, result_buf, MPI_INT, dest, 0, win);
            MPI_Win_flush(dest, win);
        }
        MPI_Win_unlock_all(win);

        MPI_Barrier(MPI_COMM_WORLD);

        /* check results */
        if (rank != dest) {
            MPI_Gather(result_buf, 1, MPI_INT, check_buf, 1, MPI_INT, dest, MPI_COMM_WORLD);
        }
        else {
            MPI_Alloc_mem(sizeof(int) * 3, MPI_INFO_NULL, &check_buf);
            MPI_Gather(target_buf, 1, MPI_INT, check_buf, 1, MPI_INT, dest, MPI_COMM_WORLD);

            if (!(check_buf[dest] == 0 && check_buf[origin_shm] == 0 && check_buf[origin_am] == 1)
                && !(check_buf[dest] == 1 && check_buf[origin_shm] == 0 &&
                     check_buf[origin_am] == 0)) {

                printf
                    ("Wrong results: target result = %d, origin_shm result = %d, origin_am result = %d\n",
                     check_buf[dest], check_buf[origin_shm], check_buf[origin_am]);

                printf
                    ("Expected results (1): target result = 1, origin_shm result = 0, origin_am result = 0\n");
                printf
                    ("Expected results (2): target result = 0, origin_shm result = 0, origin_am result = 1\n");

                errors++;
            }

            MPI_Free_mem(check_buf);
        }
    }

    MPI_Win_free(&win);

    if (rank == origin_am || rank == origin_shm) {
        MPI_Free_mem(orig_buf);
        MPI_Free_mem(result_buf);
        MPI_Free_mem(compare_buf);
    }

  exit_test:
    if (rank == dest && errors == 0)
        printf(" No Errors\n");

    MPI_Finalize();
    return 0;
}
コード例 #7
0
ファイル: IMB_rma_atomic.c プロジェクト: jedwards4b/IMB
void IMB_rma_compare_and_swap (struct comm_info* c_info, int size,  
                               struct iter_schedule* iterations,
                               MODES run_mode, double* time)
{
    double res_time = -1.;
    int root = c_info->pair1;
    int s_size;
    int i;
    void *comp_b, *orig_b, *res_b; 
    MPI_Datatype data_type = MPI_INT;
    ierr = 0;
          
    if (c_info->rank < 0)
    {
        *time = res_time;
        return;
    }    
    
    MPI_Type_size(data_type,&s_size);
    for(i=0; i<N_BARR; i++) MPI_Barrier(c_info->communicator);
    

    if (c_info->rank == c_info->pair0)
    {
        /* use r_buffer for all buffers required by compare_and_swap, because 
         * on all ranks r_buffer is zero-initialized in IMB_set_buf function */
        orig_b = (char*)c_info->r_buffer + s_size*2;
        comp_b = (char*)c_info->r_buffer + s_size;
        res_b  = c_info->r_buffer;
 
        MPI_Win_lock(MPI_LOCK_SHARED, root, 0, c_info->WIN);
        if (run_mode->AGGREGATE)
        {
            res_time = MPI_Wtime();
            for (i = 0; i < iterations->n_sample; i++)
            {
                ierr = MPI_Compare_and_swap(
                        (char*)orig_b + i%iterations->r_cache_iter*iterations->r_offs,
                        (char*)comp_b + i%iterations->r_cache_iter*iterations->r_offs,
                        (char*)res_b  + i%iterations->r_cache_iter*iterations->r_offs,
                        data_type, root, i%iterations->r_cache_iter*iterations->r_offs, 
                        c_info->WIN );
                MPI_ERRHAND(ierr);
            }
            ierr = MPI_Win_flush(root, c_info->WIN);
            res_time = (MPI_Wtime() - res_time)/iterations->n_sample;
        }    
        else if ( !run_mode->AGGREGATE )    
        {
            res_time = MPI_Wtime();
            for (i = 0; i < iterations->n_sample; i++)
            {
                ierr = MPI_Compare_and_swap(
                        (char*)orig_b + i%iterations->s_cache_iter*iterations->s_offs,
                        (char*)comp_b + i%iterations->s_cache_iter*iterations->s_offs,
                        (char*)res_b  + i%iterations->r_cache_iter*iterations->r_offs,
                        data_type, root, i%iterations->r_cache_iter*iterations->r_offs,
                        c_info->WIN );
                MPI_ERRHAND(ierr);

                ierr = MPI_Win_flush(root, c_info->WIN);
                MPI_ERRHAND(ierr);
            }
            res_time = (MPI_Wtime() - res_time)/iterations->n_sample;
        }
        MPI_Win_unlock(root, c_info->WIN);
    }
    MPI_Barrier(c_info->communicator);

    *time = res_time; 
    return;
}
コード例 #8
0
dart_ret_t dart_lock_release (dart_lock_t lock)
{
	dart_unit_t unitid;
	dart_team_myid (lock -> teamid, &unitid);
	if (lock -> is_acquired == 0)
	{
		printf ("Warning: RELEASE	- %2d has not yet required the lock\n", unitid);
		return DART_OK;
	}
	dart_gptr_t gptr_tail;
	dart_gptr_t gptr_list;
	MPI_Win win;
	int32_t *addr2, next, result[1];

	MPI_Aint disp_list;
	int32_t origin[1] = {-1};

	DART_GPTR_COPY(gptr_tail, lock -> gptr_tail);
	DART_GPTR_COPY(gptr_list, lock -> gptr_list);

	uint64_t offset_tail = gptr_tail.addr_or_offs.offset;
	int16_t  seg_id      = gptr_list.segid;
	dart_unit_t tail     = gptr_tail.unitid;
	uint16_t index       = gptr_list.flags;
	dart_gptr_getaddr(gptr_list, (void*)&addr2);

	win = dart_win_lists[index];

	/* Atomicity: Check if we are at the tail of this lock queue, if so, we are done.
	 * Otherwise, we still need to send notification. */
	MPI_Compare_and_swap (origin, &unitid, result, MPI_INT32_T, tail, offset_tail, dart_win_local_alloc);
	MPI_Win_flush (tail, dart_win_local_alloc);

	/* We are not at the tail of this lock queue. */
	if (*result != unitid)
	{
		DART_LOG_DEBUG ("%2d: UNLOCK	- waiting for next pointer (tail = %d) in team %d",
				unitid, *result, (lock -> teamid));

		if (dart_adapt_transtable_get_disp (seg_id, unitid, &disp_list) == -1)
		{
			return DART_ERR_INVAL;
		}

		/* Waiting for the update of my next pointer finished. */
		while (1)
		{
			MPI_Fetch_and_op (NULL, &next, MPI_INT, unitid, disp_list, MPI_NO_OP, win);
			MPI_Win_flush (unitid, win);

			if (next != -1) break;
		}

		DART_LOG_DEBUG ("%2d: UNLOCK	- notifying %d in team %d", unitid, next, (lock -> teamid));

		/* Notifying the next unit waiting on the lock queue. */

		MPI_Send (NULL, 0, MPI_INT, next, 0, dart_teams[index]);
		*addr2 = -1;
		MPI_Win_sync (win);
	}
	lock -> is_acquired = 0;
	DART_LOG_DEBUG ("%2d: UNLOCK	- release lock in team %d", unitid, (lock -> teamid));
	return DART_OK;
}
コード例 #9
0
FORT_DLL_SPEC void FORT_CALL mpi_compare_and_swap_ ( void*v1, void*v2, void*v3, MPI_Fint *v4, MPI_Fint *v5, MPI_Aint * v6, MPI_Fint *v7, MPI_Fint *ierr ){
    *ierr = MPI_Compare_and_swap( v1, v2, v3, (MPI_Datatype)(*v4), (int)*v5, *v6, (MPI_Win)*v7 );
}