int MPIDI_CH3I_RMA_Cleanup_ops_aggressive(MPID_Win * win_ptr) { int i, local_completed = 0, remote_completed = 0; int mpi_errno = MPI_SUCCESS; MPIDI_RMA_Target_t *curr_target = NULL; int made_progress = 0; /* If we are in an aggressive cleanup, the window must be holding * up resources. If it isn't, we are in the wrong window and * incorrectly entered this function. */ MPIU_ERR_CHKANDJUMP(win_ptr->non_empty_slots == 0, mpi_errno, MPI_ERR_OTHER, "**rmanoop"); /* find the first target that has something to issue */ for (i = 0; i < win_ptr->num_slots; i++) { if (win_ptr->slots[i].target_list != NULL) { curr_target = win_ptr->slots[i].target_list; while (curr_target != NULL && curr_target->pending_op_list == NULL) curr_target = curr_target->next; if (curr_target != NULL) break; } } if (curr_target == NULL) goto fn_exit; if (curr_target->sync.sync_flag < MPIDI_RMA_SYNC_FLUSH_LOCAL) curr_target->sync.sync_flag = MPIDI_RMA_SYNC_FLUSH_LOCAL; /* Issue out all operations. */ mpi_errno = MPIDI_CH3I_RMA_Make_progress_target(win_ptr, curr_target->target_rank, &made_progress); if (mpi_errno != MPI_SUCCESS) MPIU_ERR_POP(mpi_errno); /* Wait for local completion. */ do { mpi_errno = MPIDI_CH3I_RMA_Cleanup_ops_target(win_ptr, curr_target, &local_completed, &remote_completed); if (mpi_errno != MPI_SUCCESS) MPIU_ERR_POP(mpi_errno); if (!local_completed) { mpi_errno = wait_progress_engine(); if (mpi_errno != MPI_SUCCESS) MPIU_ERR_POP(mpi_errno); } } while (!local_completed); fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
int MPIDI_CH3I_RMA_Cleanup_ops_aggressive(MPIR_Win * win_ptr) { int i, local_completed = 0, remote_completed ATTRIBUTE((unused)) = 0; int mpi_errno = MPI_SUCCESS; MPIDI_RMA_Target_t *curr_target = NULL; int made_progress = 0; /* find the first target that has something to issue */ for (i = 0; i < win_ptr->num_slots; i++) { if (win_ptr->slots[i].target_list_head != NULL) { curr_target = win_ptr->slots[i].target_list_head; while (curr_target != NULL && curr_target->pending_net_ops_list_head == NULL && curr_target->pending_user_ops_list_head == NULL) curr_target = curr_target->next; if (curr_target != NULL) break; } } if (curr_target == NULL) goto fn_exit; if (curr_target->sync.sync_flag < MPIDI_RMA_SYNC_FLUSH_LOCAL) curr_target->sync.sync_flag = MPIDI_RMA_SYNC_FLUSH_LOCAL; /* Issue out all operations. */ mpi_errno = MPIDI_CH3I_RMA_Make_progress_target(win_ptr, curr_target->target_rank, &made_progress); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); /* Wait for local completion. */ do { MPIDI_CH3I_RMA_ops_completion(win_ptr, curr_target, local_completed, remote_completed); if (!local_completed) { mpi_errno = wait_progress_engine(); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); } } while (!local_completed); fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
int MPIDI_CH3I_RMA_Cleanup_target_aggressive(MPIR_Win * win_ptr, MPIDI_RMA_Target_t ** target) { int i, local_completed ATTRIBUTE((unused)) = 0, remote_completed = 0; int made_progress = 0; MPIDI_RMA_Target_t *curr_target = NULL; int mpi_errno = MPI_SUCCESS; (*target) = NULL; if (win_ptr->states.access_state == MPIDI_RMA_LOCK_ALL_CALLED) { /* switch to window-wide protocol */ MPIDI_VC_t *orig_vc = NULL, *target_vc = NULL; MPIDI_Comm_get_vc(win_ptr->comm_ptr, win_ptr->comm_ptr->rank, &orig_vc); for (i = 0; i < win_ptr->comm_ptr->local_size; i++) { if (i == win_ptr->comm_ptr->rank) continue; MPIDI_Comm_get_vc(win_ptr->comm_ptr, i, &target_vc); if (orig_vc->node_id != target_vc->node_id) { mpi_errno = MPIDI_CH3I_Win_find_target(win_ptr, i, &curr_target); if (mpi_errno) MPIR_ERR_POP(mpi_errno); if (curr_target == NULL) { win_ptr->outstanding_locks++; mpi_errno = send_lock_msg(i, MPI_LOCK_SHARED, win_ptr); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); } } } win_ptr->states.access_state = MPIDI_RMA_LOCK_ALL_ISSUED; } do { /* find a non-empty slot and set the FLUSH flag on the first * target */ /* TODO: we should think about better strategies on selecting the target */ for (i = 0; i < win_ptr->num_slots; i++) if (win_ptr->slots[i].target_list_head != NULL) break; curr_target = win_ptr->slots[i].target_list_head; if (curr_target->sync.sync_flag < MPIDI_RMA_SYNC_FLUSH) { curr_target->sync.sync_flag = MPIDI_RMA_SYNC_FLUSH; } /* Issue out all operations. */ mpi_errno = MPIDI_CH3I_RMA_Make_progress_target(win_ptr, curr_target->target_rank, &made_progress); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); /* Wait for remote completion. */ do { MPIDI_CH3I_RMA_ops_completion(win_ptr, curr_target, local_completed, remote_completed); if (!remote_completed) { mpi_errno = wait_progress_engine(); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); } } while (!remote_completed); /* Cleanup the target. */ mpi_errno = MPIDI_CH3I_Win_target_dequeue_and_free(win_ptr, curr_target); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); /* check if we got a target */ (*target) = MPIDI_CH3I_Win_target_alloc(win_ptr); } while ((*target) == NULL); fn_exit: return mpi_errno; fn_fail: goto fn_exit; }
int MPID_Win_free(MPIR_Win ** win_ptr) { int mpi_errno = MPI_SUCCESS; int in_use; MPIR_Comm *comm_ptr; MPIR_Errflag_t errflag = MPIR_ERR_NONE; MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPID_WIN_FREE); MPIR_FUNC_VERBOSE_RMA_ENTER(MPID_STATE_MPID_WIN_FREE); MPIR_ERR_CHKANDJUMP(((*win_ptr)->states.access_state != MPIDI_RMA_NONE && (*win_ptr)->states.access_state != MPIDI_RMA_FENCE_ISSUED && (*win_ptr)->states.access_state != MPIDI_RMA_FENCE_GRANTED) || ((*win_ptr)->states.exposure_state != MPIDI_RMA_NONE), mpi_errno, MPI_ERR_RMA_SYNC, "**rmasync"); /* 1. Here we must wait until all passive locks are released on this target, * because for some UNLOCK messages, we do not send ACK back to origin, * we must wait until lock is released so that we can free window. * 2. We also need to wait until AT completion counter being zero, because * this counter is increment everytime we meet a GET-like operation, it is * possible that when target entering Win_free, passive epoch is not finished * yet and there are still GETs doing on this target. * 3. We also need to wait until lock queue becomes empty. It is possible * that some lock requests is still waiting in the queue when target is * entering Win_free. */ while ((*win_ptr)->current_lock_type != MPID_LOCK_NONE || (*win_ptr)->at_completion_counter != 0 || (*win_ptr)->target_lock_queue_head != NULL || (*win_ptr)->current_target_lock_data_bytes != 0 || (*win_ptr)->sync_request_cnt != 0) { mpi_errno = wait_progress_engine(); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); } mpi_errno = MPID_Barrier((*win_ptr)->comm_ptr, &errflag); if (mpi_errno) MPIR_ERR_POP(mpi_errno); /* Free window resources in lower layer. */ if (MPIDI_CH3U_Win_hooks.win_free != NULL) { mpi_errno = MPIDI_CH3U_Win_hooks.win_free(win_ptr); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); } /* dequeue window from the global list */ MPIR_Assert((*win_ptr)->active == FALSE); DL_DELETE(MPIDI_RMA_Win_inactive_list_head, (*win_ptr)); if (MPIDI_RMA_Win_inactive_list_head == NULL && MPIDI_RMA_Win_active_list_head == NULL) { /* this is the last window, de-register RMA progress hook */ mpi_errno = MPID_Progress_deregister_hook(MPIDI_CH3I_RMA_Progress_hook_id); if (mpi_errno != MPI_SUCCESS) { MPIR_ERR_POP(mpi_errno); } } comm_ptr = (*win_ptr)->comm_ptr; mpi_errno = MPIR_Comm_free_impl(comm_ptr); if (mpi_errno) MPIR_ERR_POP(mpi_errno); if ((*win_ptr)->basic_info_table != NULL) MPL_free((*win_ptr)->basic_info_table); MPL_free((*win_ptr)->op_pool_start); MPL_free((*win_ptr)->target_pool_start); MPL_free((*win_ptr)->slots); MPL_free((*win_ptr)->target_lock_entry_pool_start); MPIR_Assert((*win_ptr)->current_target_lock_data_bytes == 0); /* Free the attached buffer for windows created with MPI_Win_allocate() */ if ((*win_ptr)->create_flavor == MPI_WIN_FLAVOR_ALLOCATE || (*win_ptr)->create_flavor == MPI_WIN_FLAVOR_SHARED) { if ((*win_ptr)->shm_allocated == FALSE && (*win_ptr)->size > 0) { MPL_free((*win_ptr)->base); } } MPIR_Object_release_ref(*win_ptr, &in_use); /* MPI windows don't have reference count semantics, so this should always be true */ MPIR_Assert(!in_use); MPIR_Handle_obj_free(&MPIR_Win_mem, *win_ptr); fn_exit: MPIR_FUNC_VERBOSE_RMA_EXIT(MPID_STATE_MPID_WIN_FREE); return mpi_errno; fn_fail: goto fn_exit; }
int MPID_Compare_and_swap(const void *origin_addr, const void *compare_addr, void *result_addr, MPI_Datatype datatype, int target_rank, MPI_Aint target_disp, MPID_Win * win_ptr) { int mpi_errno = MPI_SUCCESS; int rank; MPIDI_VC_t *orig_vc = NULL, *target_vc = NULL; int made_progress = 0; MPIDI_STATE_DECL(MPID_STATE_MPID_COMPARE_AND_SWAP); MPIDI_RMA_FUNC_ENTER(MPID_STATE_MPID_COMPARE_AND_SWAP); MPIR_ERR_CHKANDJUMP(win_ptr->states.access_state == MPIDI_RMA_NONE, mpi_errno, MPI_ERR_RMA_SYNC, "**rmasync"); if (target_rank == MPI_PROC_NULL) { goto fn_exit; } rank = win_ptr->comm_ptr->rank; if (win_ptr->shm_allocated == TRUE && target_rank != rank && win_ptr->create_flavor != MPI_WIN_FLAVOR_SHARED) { /* check if target is local and shared memory is allocated on window, * if so, we directly perform this operation on shared memory region. */ /* FIXME: Here we decide whether to perform SHM operations by checking if origin and target are on * the same node. However, in ch3:sock, even if origin and target are on the same node, they do * not within the same SHM region. Here we filter out ch3:sock by checking shm_allocated flag first, * which is only set to TRUE when SHM region is allocated in nemesis. * In future we need to figure out a way to check if origin and target are in the same "SHM comm". */ MPIDI_Comm_get_vc(win_ptr->comm_ptr, rank, &orig_vc); MPIDI_Comm_get_vc(win_ptr->comm_ptr, target_rank, &target_vc); } /* The datatype must be predefined, and one of: C integer, Fortran integer, * Logical, Multi-language types, or Byte. This is checked above the ADI, * so there's no need to check it again here. */ /* FIXME: For shared memory windows, we should provide an implementation * that uses a processor atomic operation. */ if (target_rank == rank || win_ptr->create_flavor == MPI_WIN_FLAVOR_SHARED || (win_ptr->shm_allocated == TRUE && orig_vc->node_id == target_vc->node_id)) { mpi_errno = MPIDI_CH3I_Shm_cas_op(origin_addr, compare_addr, result_addr, datatype, target_rank, target_disp, win_ptr); if (mpi_errno) MPIR_ERR_POP(mpi_errno); } else { MPIDI_RMA_Op_t *op_ptr = NULL; MPIDI_CH3_Pkt_cas_t *cas_pkt = NULL; MPI_Aint type_size; void *src = NULL, *dest = NULL; /* Append this operation to the RMA ops queue */ mpi_errno = MPIDI_CH3I_Win_get_op(win_ptr, &op_ptr); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); MPIR_T_PVAR_TIMER_START(RMA, rma_rmaqueue_set); /******************** Setting operation struct areas ***********************/ op_ptr->origin_addr = (void *) origin_addr; op_ptr->origin_count = 1; op_ptr->origin_datatype = datatype; op_ptr->result_addr = result_addr; op_ptr->result_datatype = datatype; op_ptr->compare_addr = (void *) compare_addr; op_ptr->compare_datatype = datatype; op_ptr->target_rank = target_rank; op_ptr->piggyback_lock_candidate = 1; /* CAS is always able to piggyback LOCK */ /************** Setting packet struct areas in operation ****************/ cas_pkt = &(op_ptr->pkt.cas); MPIDI_Pkt_init(cas_pkt, MPIDI_CH3_PKT_CAS_IMMED); cas_pkt->addr = (char *) win_ptr->basic_info_table[target_rank].base_addr + win_ptr->basic_info_table[target_rank].disp_unit * target_disp; cas_pkt->datatype = datatype; cas_pkt->target_win_handle = win_ptr->basic_info_table[target_rank].win_handle; cas_pkt->flags = MPIDI_CH3_PKT_FLAG_NONE; /* REQUIRE: All datatype arguments must be of the same, builtin * type and counts must be 1. */ MPID_Datatype_get_size_macro(datatype, type_size); MPIU_Assert(type_size <= sizeof(MPIDI_CH3_CAS_Immed_u)); src = (void *) origin_addr, dest = (void *) (&(cas_pkt->origin_data)); mpi_errno = immed_copy(src, dest, type_size); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); src = (void *) compare_addr, dest = (void *) (&(cas_pkt->compare_data)); mpi_errno = immed_copy(src, dest, type_size); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); MPIR_T_PVAR_TIMER_END(RMA, rma_rmaqueue_set); mpi_errno = MPIDI_CH3I_Win_enqueue_op(win_ptr, op_ptr); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIDI_CH3I_RMA_Make_progress_target(win_ptr, target_rank, &made_progress); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); if (MPIR_CVAR_CH3_RMA_ACTIVE_REQ_THRESHOLD >= 0 && MPIDI_CH3I_RMA_Active_req_cnt >= MPIR_CVAR_CH3_RMA_ACTIVE_REQ_THRESHOLD) { while (MPIDI_CH3I_RMA_Active_req_cnt >= MPIR_CVAR_CH3_RMA_ACTIVE_REQ_THRESHOLD) { mpi_errno = wait_progress_engine(); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); } } } fn_exit: MPIDI_RMA_FUNC_EXIT(MPID_STATE_MPID_COMPARE_AND_SWAP); return mpi_errno; /* --BEGIN ERROR HANDLING-- */ fn_fail: goto fn_exit; /* --END ERROR HANDLING-- */ }
int MPIDI_CH3I_Get_accumulate(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr, int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPID_Win * win_ptr, MPID_Request * ureq) { int mpi_errno = MPI_SUCCESS; MPIDI_msg_sz_t orig_data_sz, target_data_sz; int rank; int dt_contig ATTRIBUTE((unused)); MPI_Aint dt_true_lb ATTRIBUTE((unused)); MPID_Datatype *dtp; MPIDI_VC_t *orig_vc = NULL, *target_vc = NULL; int made_progress = 0; MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3I_GET_ACCUMULATE); MPIDI_RMA_FUNC_ENTER(MPID_STATE_MPIDI_CH3I_GET_ACCUMULATE); MPIR_ERR_CHKANDJUMP(win_ptr->states.access_state == MPIDI_RMA_NONE, mpi_errno, MPI_ERR_RMA_SYNC, "**rmasync"); if (target_rank == MPI_PROC_NULL) { goto fn_exit; } MPIDI_Datatype_get_info(target_count, target_datatype, dt_contig, target_data_sz, dtp, dt_true_lb); if (target_data_sz == 0) { goto fn_exit; } rank = win_ptr->comm_ptr->rank; if (win_ptr->shm_allocated == TRUE && target_rank != rank && win_ptr->create_flavor != MPI_WIN_FLAVOR_SHARED) { /* check if target is local and shared memory is allocated on window, * if so, we directly perform this operation on shared memory region. */ /* FIXME: Here we decide whether to perform SHM operations by checking if origin and target are on * the same node. However, in ch3:sock, even if origin and target are on the same node, they do * not within the same SHM region. Here we filter out ch3:sock by checking shm_allocated flag first, * which is only set to TRUE when SHM region is allocated in nemesis. * In future we need to figure out a way to check if origin and target are in the same "SHM comm". */ MPIDI_Comm_get_vc(win_ptr->comm_ptr, rank, &orig_vc); MPIDI_Comm_get_vc(win_ptr->comm_ptr, target_rank, &target_vc); } /* Do =! rank first (most likely branch?) */ if (target_rank == rank || win_ptr->create_flavor == MPI_WIN_FLAVOR_SHARED || (win_ptr->shm_allocated == TRUE && orig_vc->node_id == target_vc->node_id)) { mpi_errno = MPIDI_CH3I_Shm_get_acc_op(origin_addr, origin_count, origin_datatype, result_addr, result_count, result_datatype, target_rank, target_disp, target_count, target_datatype, op, win_ptr); if (mpi_errno) MPIR_ERR_POP(mpi_errno); if (ureq) { /* Complete user request and release the ch3 ref */ mpi_errno = MPID_Request_complete(ureq); if (mpi_errno != MPI_SUCCESS) { MPIR_ERR_POP(mpi_errno); } } } else { MPIDI_RMA_Op_t *op_ptr = NULL; MPIDI_CH3_Pkt_get_accum_t *get_accum_pkt; MPI_Aint origin_type_size; MPI_Aint target_type_size; int use_immed_pkt = FALSE, i; int is_origin_contig, is_target_contig, is_result_contig; MPI_Aint stream_elem_count, stream_unit_count; MPI_Aint predefined_dtp_size, predefined_dtp_count, predefined_dtp_extent; MPID_Datatype *origin_dtp = NULL, *target_dtp = NULL, *result_dtp = NULL; int is_empty_origin = FALSE; /* Judge if origin buffer is empty */ if (op == MPI_NO_OP) is_empty_origin = TRUE; /* Append the operation to the window's RMA ops queue */ mpi_errno = MPIDI_CH3I_Win_get_op(win_ptr, &op_ptr); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); /* TODO: Can we use the MPIDI_RMA_ACC_CONTIG optimization? */ MPIR_T_PVAR_TIMER_START(RMA, rma_rmaqueue_set); /******************** Setting operation struct areas ***********************/ op_ptr->origin_addr = (void *) origin_addr; op_ptr->origin_count = origin_count; op_ptr->origin_datatype = origin_datatype; op_ptr->result_addr = result_addr; op_ptr->result_count = result_count; op_ptr->result_datatype = result_datatype; op_ptr->target_rank = target_rank; /* Remember user request */ op_ptr->ureq = ureq; /* if source or target datatypes are derived, increment their * reference counts */ if (is_empty_origin == FALSE && !MPIR_DATATYPE_IS_PREDEFINED(origin_datatype)) { MPID_Datatype_get_ptr(origin_datatype, origin_dtp); } if (!MPIR_DATATYPE_IS_PREDEFINED(result_datatype)) { MPID_Datatype_get_ptr(result_datatype, result_dtp); } if (!MPIR_DATATYPE_IS_PREDEFINED(target_datatype)) { MPID_Datatype_get_ptr(target_datatype, target_dtp); } if (is_empty_origin == FALSE) { MPID_Datatype_get_size_macro(origin_datatype, origin_type_size); MPIU_Assign_trunc(orig_data_sz, origin_count * origin_type_size, MPIDI_msg_sz_t); } else { /* If origin buffer is empty, set origin data size to 0 */ orig_data_sz = 0; } MPID_Datatype_get_size_macro(target_datatype, target_type_size); /* Get size and count for predefined datatype elements */ if (MPIR_DATATYPE_IS_PREDEFINED(target_datatype)) { predefined_dtp_size = target_type_size; predefined_dtp_count = target_count; MPID_Datatype_get_extent_macro(target_datatype, predefined_dtp_extent); } else { MPIU_Assert(target_dtp->basic_type != MPI_DATATYPE_NULL); MPID_Datatype_get_size_macro(target_dtp->basic_type, predefined_dtp_size); predefined_dtp_count = target_data_sz / predefined_dtp_size; MPID_Datatype_get_extent_macro(target_dtp->basic_type, predefined_dtp_extent); } MPIU_Assert(predefined_dtp_count > 0 && predefined_dtp_size > 0 && predefined_dtp_extent > 0); /* Calculate number of predefined elements in each stream unit, and * total number of stream units. */ stream_elem_count = MPIDI_CH3U_Acc_stream_size / predefined_dtp_extent; stream_unit_count = (predefined_dtp_count - 1) / stream_elem_count + 1; MPIU_Assert(stream_elem_count > 0 && stream_unit_count > 0); for (i = 0; i < stream_unit_count; i++) { if (origin_dtp != NULL) { MPID_Datatype_add_ref(origin_dtp); } if (target_dtp != NULL) { MPID_Datatype_add_ref(target_dtp); } if (result_dtp != NULL) { MPID_Datatype_add_ref(result_dtp); } } if (is_empty_origin == FALSE) { MPID_Datatype_is_contig(origin_datatype, &is_origin_contig); } else { /* If origin buffer is empty, mark origin data as contig data */ is_origin_contig = 1; } MPID_Datatype_is_contig(target_datatype, &is_target_contig); MPID_Datatype_is_contig(result_datatype, &is_result_contig); /* Judge if we can use IMMED data packet */ if ((is_empty_origin == TRUE || MPIR_DATATYPE_IS_PREDEFINED(origin_datatype)) && MPIR_DATATYPE_IS_PREDEFINED(result_datatype) && MPIR_DATATYPE_IS_PREDEFINED(target_datatype) && is_origin_contig && is_target_contig && is_result_contig) { if (target_data_sz <= MPIDI_RMA_IMMED_BYTES) use_immed_pkt = TRUE; } /* Judge if this operation is a piggyback candidate */ if ((is_empty_origin == TRUE || MPIR_DATATYPE_IS_PREDEFINED(origin_datatype)) && MPIR_DATATYPE_IS_PREDEFINED(result_datatype) && MPIR_DATATYPE_IS_PREDEFINED(target_datatype)) { /* FIXME: currently we only piggyback LOCK flag with op using predefined datatypes * for origin, target and result data. We should extend this optimization to derived * datatypes as well. */ if (orig_data_sz <= MPIR_CVAR_CH3_RMA_OP_PIGGYBACK_LOCK_DATA_SIZE) op_ptr->piggyback_lock_candidate = 1; } /************** Setting packet struct areas in operation ****************/ get_accum_pkt = &(op_ptr->pkt.get_accum); if (use_immed_pkt) { MPIDI_Pkt_init(get_accum_pkt, MPIDI_CH3_PKT_GET_ACCUM_IMMED); } else { MPIDI_Pkt_init(get_accum_pkt, MPIDI_CH3_PKT_GET_ACCUM); } get_accum_pkt->addr = (char *) win_ptr->basic_info_table[target_rank].base_addr + win_ptr->basic_info_table[target_rank].disp_unit * target_disp; get_accum_pkt->count = target_count; get_accum_pkt->datatype = target_datatype; get_accum_pkt->info.dataloop_size = 0; get_accum_pkt->op = op; get_accum_pkt->target_win_handle = win_ptr->basic_info_table[target_rank].win_handle; get_accum_pkt->flags = MPIDI_CH3_PKT_FLAG_NONE; if (use_immed_pkt) { void *src = (void *) origin_addr, *dest = (void *) (get_accum_pkt->info.data); mpi_errno = immed_copy(src, dest, orig_data_sz); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); } MPIR_T_PVAR_TIMER_END(RMA, rma_rmaqueue_set); mpi_errno = MPIDI_CH3I_Win_enqueue_op(win_ptr, op_ptr); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIDI_CH3I_RMA_Make_progress_target(win_ptr, target_rank, &made_progress); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); if (MPIR_CVAR_CH3_RMA_ACTIVE_REQ_THRESHOLD >= 0 && MPIDI_CH3I_RMA_Active_req_cnt >= MPIR_CVAR_CH3_RMA_ACTIVE_REQ_THRESHOLD) { while (MPIDI_CH3I_RMA_Active_req_cnt >= MPIR_CVAR_CH3_RMA_ACTIVE_REQ_THRESHOLD) { mpi_errno = wait_progress_engine(); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); } } } fn_exit: MPIDI_RMA_FUNC_EXIT(MPID_STATE_MPIDI_CH3I_GET_ACCUMULATE); return mpi_errno; /* --BEGIN ERROR HANDLING-- */ fn_fail: goto fn_exit; /* --END ERROR HANDLING-- */ }
int MPIDI_CH3I_Put(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPID_Win * win_ptr, MPID_Request * ureq) { int mpi_errno = MPI_SUCCESS; int dt_contig ATTRIBUTE((unused)), rank; MPID_Datatype *dtp; MPI_Aint dt_true_lb ATTRIBUTE((unused)); MPIDI_msg_sz_t data_sz; MPIDI_VC_t *orig_vc = NULL, *target_vc = NULL; int made_progress = 0; MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3I_PUT); MPIDI_RMA_FUNC_ENTER(MPID_STATE_MPIDI_CH3I_PUT); MPIR_ERR_CHKANDJUMP(win_ptr->states.access_state == MPIDI_RMA_NONE, mpi_errno, MPI_ERR_RMA_SYNC, "**rmasync"); if (target_rank == MPI_PROC_NULL) { goto fn_exit; } MPIDI_Datatype_get_info(origin_count, origin_datatype, dt_contig, data_sz, dtp, dt_true_lb); if (data_sz == 0) { goto fn_exit; } rank = win_ptr->comm_ptr->rank; if (win_ptr->shm_allocated == TRUE && target_rank != rank && win_ptr->create_flavor != MPI_WIN_FLAVOR_SHARED) { /* check if target is local and shared memory is allocated on window, * if so, we directly perform this operation on shared memory region. */ /* FIXME: Here we decide whether to perform SHM operations by checking if origin and target are on * the same node. However, in ch3:sock, even if origin and target are on the same node, they do * not within the same SHM region. Here we filter out ch3:sock by checking shm_allocated flag first, * which is only set to TRUE when SHM region is allocated in nemesis. * In future we need to figure out a way to check if origin and target are in the same "SHM comm". */ MPIDI_Comm_get_vc(win_ptr->comm_ptr, rank, &orig_vc); MPIDI_Comm_get_vc(win_ptr->comm_ptr, target_rank, &target_vc); } /* If the put is a local operation, do it here */ if (target_rank == rank || win_ptr->create_flavor == MPI_WIN_FLAVOR_SHARED || (win_ptr->shm_allocated == TRUE && orig_vc->node_id == target_vc->node_id)) { mpi_errno = MPIDI_CH3I_Shm_put_op(origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count, target_datatype, win_ptr); if (mpi_errno) MPIR_ERR_POP(mpi_errno); if (ureq) { /* Complete user request and release the ch3 ref */ mpi_errno = MPID_Request_complete(ureq); if (mpi_errno != MPI_SUCCESS) { MPIR_ERR_POP(mpi_errno); } } } else { MPIDI_RMA_Op_t *op_ptr = NULL; MPIDI_CH3_Pkt_put_t *put_pkt = NULL; int use_immed_pkt = FALSE; int is_origin_contig, is_target_contig; /* queue it up */ mpi_errno = MPIDI_CH3I_Win_get_op(win_ptr, &op_ptr); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); MPIR_T_PVAR_TIMER_START(RMA, rma_rmaqueue_set); /******************** Setting operation struct areas ***********************/ /* FIXME: For contig and very short operations, use a streamlined op */ op_ptr->origin_addr = (void *) origin_addr; op_ptr->origin_count = origin_count; op_ptr->origin_datatype = origin_datatype; op_ptr->target_rank = target_rank; /* Remember user request */ op_ptr->ureq = ureq; /* if source or target datatypes are derived, increment their * reference counts */ if (!MPIR_DATATYPE_IS_PREDEFINED(origin_datatype)) { MPID_Datatype_get_ptr(origin_datatype, dtp); MPID_Datatype_add_ref(dtp); } if (!MPIR_DATATYPE_IS_PREDEFINED(target_datatype)) { MPID_Datatype_get_ptr(target_datatype, dtp); MPID_Datatype_add_ref(dtp); } MPID_Datatype_is_contig(origin_datatype, &is_origin_contig); MPID_Datatype_is_contig(target_datatype, &is_target_contig); /* Judge if we can use IMMED data packet */ if (MPIR_DATATYPE_IS_PREDEFINED(origin_datatype) && MPIR_DATATYPE_IS_PREDEFINED(target_datatype) && is_origin_contig && is_target_contig) { if (data_sz <= MPIDI_RMA_IMMED_BYTES) use_immed_pkt = TRUE; } /* Judge if this operation is an piggyback candidate */ if (MPIR_DATATYPE_IS_PREDEFINED(origin_datatype) && MPIR_DATATYPE_IS_PREDEFINED(target_datatype)) { /* FIXME: currently we only piggyback LOCK flag with op using predefined datatypes * for both origin and target data. We should extend this optimization to derived * datatypes as well. */ if (data_sz <= MPIR_CVAR_CH3_RMA_OP_PIGGYBACK_LOCK_DATA_SIZE) op_ptr->piggyback_lock_candidate = 1; } /************** Setting packet struct areas in operation ****************/ put_pkt = &(op_ptr->pkt.put); if (use_immed_pkt) { MPIDI_Pkt_init(put_pkt, MPIDI_CH3_PKT_PUT_IMMED); } else { MPIDI_Pkt_init(put_pkt, MPIDI_CH3_PKT_PUT); } put_pkt->addr = (char *) win_ptr->basic_info_table[target_rank].base_addr + win_ptr->basic_info_table[target_rank].disp_unit * target_disp; put_pkt->count = target_count; put_pkt->datatype = target_datatype; put_pkt->info.dataloop_size = 0; put_pkt->target_win_handle = win_ptr->basic_info_table[target_rank].win_handle; put_pkt->source_win_handle = win_ptr->handle; put_pkt->flags = MPIDI_CH3_PKT_FLAG_NONE; if (use_immed_pkt) { void *src = (void *) origin_addr, *dest = (void *) (put_pkt->info.data); mpi_errno = immed_copy(src, dest, data_sz); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); } MPIR_T_PVAR_TIMER_END(RMA, rma_rmaqueue_set); mpi_errno = MPIDI_CH3I_Win_enqueue_op(win_ptr, op_ptr); if (mpi_errno) MPIR_ERR_POP(mpi_errno); mpi_errno = MPIDI_CH3I_RMA_Make_progress_target(win_ptr, target_rank, &made_progress); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); if (MPIR_CVAR_CH3_RMA_ACTIVE_REQ_THRESHOLD >= 0 && MPIDI_CH3I_RMA_Active_req_cnt >= MPIR_CVAR_CH3_RMA_ACTIVE_REQ_THRESHOLD) { while (MPIDI_CH3I_RMA_Active_req_cnt >= MPIR_CVAR_CH3_RMA_ACTIVE_REQ_THRESHOLD) { mpi_errno = wait_progress_engine(); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); } } } fn_exit: MPIDI_RMA_FUNC_EXIT(MPID_STATE_MPIDI_CH3I_PUT); return mpi_errno; /* --BEGIN ERROR HANDLING-- */ fn_fail: goto fn_exit; /* --END ERROR HANDLING-- */ }
int MPIDI_CH3I_RMA_Cleanup_target_aggressive(MPID_Win * win_ptr, MPIDI_RMA_Target_t ** target) { int i, local_completed = 0, remote_completed = 0; int made_progress = 0; MPIDI_RMA_Target_t *curr_target = NULL; int mpi_errno = MPI_SUCCESS; (*target) = NULL; /* If we are in an aggressive cleanup, the window must be holding * up resources. If it isn't, we are in the wrong window and * incorrectly entered this function. */ MPIU_ERR_CHKANDJUMP(win_ptr->non_empty_slots == 0, mpi_errno, MPI_ERR_OTHER, "**rmanotarget"); if (win_ptr->states.access_state == MPIDI_RMA_LOCK_ALL_CALLED) { /* switch to window-wide protocol */ MPIDI_VC_t *orig_vc = NULL, *target_vc = NULL; MPIDI_Comm_get_vc(win_ptr->comm_ptr, win_ptr->comm_ptr->rank, &orig_vc); for (i = 0; i < win_ptr->comm_ptr->local_size; i++) { if (i == win_ptr->comm_ptr->rank) continue; MPIDI_Comm_get_vc(win_ptr->comm_ptr, i, &target_vc); if (orig_vc->node_id != target_vc->node_id) { mpi_errno = MPIDI_CH3I_Win_find_target(win_ptr, i, &curr_target); if (mpi_errno) MPIU_ERR_POP(mpi_errno); if (curr_target == NULL) { win_ptr->outstanding_locks++; mpi_errno = send_lock_msg(i, MPI_LOCK_SHARED, win_ptr); if (mpi_errno != MPI_SUCCESS) MPIU_ERR_POP(mpi_errno); } } } win_ptr->states.access_state = MPIDI_RMA_LOCK_ALL_ISSUED; } do { /* find a non-empty slot and set the FLUSH flag on the first * target */ /* TODO: we should think about better strategies on selecting the target */ for (i = 0; i < win_ptr->num_slots; i++) if (win_ptr->slots[i].target_list != NULL) break; curr_target = win_ptr->slots[i].target_list; if (curr_target->sync.sync_flag < MPIDI_RMA_SYNC_FLUSH) { curr_target->sync.sync_flag = MPIDI_RMA_SYNC_FLUSH; curr_target->sync.have_remote_incomplete_ops = 0; curr_target->sync.outstanding_acks++; } /* Issue out all operations. */ mpi_errno = MPIDI_CH3I_RMA_Make_progress_target(win_ptr, curr_target->target_rank, &made_progress); if (mpi_errno != MPI_SUCCESS) MPIU_ERR_POP(mpi_errno); /* Wait for remote completion. */ do { mpi_errno = MPIDI_CH3I_RMA_Cleanup_ops_target(win_ptr, curr_target, &local_completed, &remote_completed); if (mpi_errno != MPI_SUCCESS) MPIU_ERR_POP(mpi_errno); if (!remote_completed) { mpi_errno = wait_progress_engine(); if (mpi_errno != MPI_SUCCESS) MPIU_ERR_POP(mpi_errno); } } while (!remote_completed); /* Cleanup the target. */ mpi_errno = MPIDI_CH3I_RMA_Cleanup_single_target(win_ptr, curr_target); if (mpi_errno != MPI_SUCCESS) MPIU_ERR_POP(mpi_errno); /* check if we got a target */ (*target) = MPIDI_CH3I_Win_target_alloc(win_ptr); } while ((*target) == NULL); fn_exit: return mpi_errno; fn_fail: goto fn_exit; }