int MPIDI_RMA_init(void) { int mpi_errno = MPI_SUCCESS; int i; MPIR_CHKPMEM_DECL(3); MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_MPIDI_RMA_INIT); MPIR_FUNC_VERBOSE_RMA_ENTER(MPID_STATE_MPIDI_RMA_INIT); MPIR_CHKPMEM_MALLOC(global_rma_op_pool_start, MPIDI_RMA_Op_t *, sizeof(MPIDI_RMA_Op_t) * MPIR_CVAR_CH3_RMA_OP_GLOBAL_POOL_SIZE, mpi_errno, "RMA op pool"); for (i = 0; i < MPIR_CVAR_CH3_RMA_OP_GLOBAL_POOL_SIZE; i++) { global_rma_op_pool_start[i].pool_type = MPIDI_RMA_POOL_GLOBAL; MPL_DL_APPEND(global_rma_op_pool_head, &(global_rma_op_pool_start[i])); } MPIR_CHKPMEM_MALLOC(global_rma_target_pool_start, MPIDI_RMA_Target_t *, sizeof(MPIDI_RMA_Target_t) * MPIR_CVAR_CH3_RMA_TARGET_GLOBAL_POOL_SIZE, mpi_errno, "RMA target pool"); for (i = 0; i < MPIR_CVAR_CH3_RMA_TARGET_GLOBAL_POOL_SIZE; i++) { global_rma_target_pool_start[i].pool_type = MPIDI_RMA_POOL_GLOBAL; MPL_DL_APPEND(global_rma_target_pool_head, &(global_rma_target_pool_start[i])); } fn_exit: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_MPIDI_RMA_INIT); return mpi_errno; fn_fail: MPIR_CHKPMEM_REAP(); goto fn_fail; }
static int rptl_put(ptl_handle_md_t md_handle, ptl_size_t local_offset, ptl_size_t length, ptl_ack_req_t ack_req, ptl_process_t target_id, ptl_pt_index_t pt_index, ptl_match_bits_t match_bits, ptl_size_t remote_offset, void *user_ptr, ptl_hdr_data_t hdr_data, enum rptl_pt_type pt_type) { struct rptl_op *op; int ret = PTL_OK; struct rptl_target *target; MPIDI_STATE_DECL(MPID_STATE_RPTL_PUT); MPIDI_FUNC_ENTER(MPID_STATE_RPTL_PUT); ret = find_target(target_id, &target); RPTLU_ERR_POP(ret, "error finding target structure\n"); ret = rptli_op_alloc(&op, target); RPTLU_ERR_POP(ret, "error allocating op\n"); op->op_type = RPTL_OP_PUT; op->state = RPTL_OP_STATE_QUEUED; /* store the user parameters */ op->u.put.md_handle = md_handle; op->u.put.local_offset = local_offset; op->u.put.length = length; op->u.put.ack_req = ack_req; op->u.put.target_id = target_id; op->u.put.pt_index = pt_index; op->u.put.match_bits = match_bits; op->u.put.remote_offset = remote_offset; op->u.put.user_ptr = user_ptr; op->u.put.hdr_data = hdr_data; /* place to store the send and ack events */ op->u.put.send = NULL; op->u.put.ack = NULL; op->u.put.pt_type = pt_type; op->events_ready = 0; op->target = target; if (op->u.put.pt_type == RPTL_PT_DATA) MPL_DL_APPEND(target->data_op_list, op); else MPL_DL_APPEND(target->control_op_list, op); ret = poke_progress(); RPTLU_ERR_POP(ret, "Error from poke_progress\n"); fn_exit: MPIDI_FUNC_EXIT(MPID_STATE_RPTL_PUT); return ret; fn_fail: goto fn_exit; }
static int find_target(ptl_process_t id, struct rptl_target **target) { int mpi_errno = MPI_SUCCESS; int ret = PTL_OK; struct rptl_target *t; MPIU_CHKPMEM_DECL(1); MPIDI_STATE_DECL(MPID_STATE_FIND_TARGET); MPIDI_FUNC_ENTER(MPID_STATE_FIND_TARGET); for (t = rptl_info.target_list; t; t = t->next) if (IDS_ARE_EQUAL(t->id, id)) break; /* if the target does not already exist, create one */ if (t == NULL) { MPIU_CHKPMEM_MALLOC(t, struct rptl_target *, sizeof(struct rptl_target), mpi_errno, "rptl target"); MPL_DL_APPEND(rptl_info.target_list, t); t->id = id; t->state = RPTL_TARGET_STATE_ACTIVE; t->rptl = NULL; t->op_segment_list = NULL; t->op_pool = NULL; t->data_op_list = NULL; t->control_op_list = NULL; t->issued_data_ops = 0; }
int MPID_nem_ptl_rptl_ptinit(ptl_handle_ni_t ni_handle, ptl_handle_eq_t eq_handle, ptl_pt_index_t data_pt, ptl_pt_index_t control_pt) { int ret = PTL_OK; struct rptl *rptl; int mpi_errno = MPI_SUCCESS; int i; ptl_md_t md; MPIU_CHKPMEM_DECL(2); MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_PTL_RPTL_PTINIT); MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_PTL_RPTL_PTINIT); /* setup the parts of rptls that can be done before world size or * target information */ MPIU_CHKPMEM_MALLOC(rptl, struct rptl *, sizeof(struct rptl), mpi_errno, "rptl"); MPL_DL_APPEND(rptl_info.rptl_list, rptl); rptl->local_state = RPTL_LOCAL_STATE_ACTIVE; rptl->pause_ack_counter = 0; rptl->data.ob_max_count = 0; rptl->data.ob_curr_count = 0; rptl->data.pt = data_pt; rptl->control.pt = control_pt; rptl->ni = ni_handle; rptl->eq = eq_handle; md.start = 0; md.length = (ptl_size_t) (-1); md.options = 0x0; md.eq_handle = rptl->eq; md.ct_handle = PTL_CT_NONE; ret = PtlMDBind(rptl->ni, &md, &rptl->md); RPTLU_ERR_POP(ret, "Error binding new global MD\n"); /* post world_size number of empty buffers on the control portal */ if (rptl->control.pt != PTL_PT_ANY) { MPIU_CHKPMEM_MALLOC(rptl->control.me, ptl_handle_me_t *, 2 * rptl_info.world_size * sizeof(ptl_handle_me_t), mpi_errno, "rptl target info"); for (i = 0; i < 2 * rptl_info.world_size; i++) { ret = rptli_post_control_buffer(rptl->ni, rptl->control.pt, &rptl->control.me[i]); RPTLU_ERR_POP(ret, "Error in rptli_post_control_buffer\n"); } rptl->control.me_idx = 0; }
int MPID_nem_ptl_rptl_get(ptl_handle_md_t md_handle, ptl_size_t local_offset, ptl_size_t length, ptl_process_t target_id, ptl_pt_index_t pt_index, ptl_match_bits_t match_bits, ptl_size_t remote_offset, void *user_ptr) { struct rptl_op *op; int ret = PTL_OK; struct rptl_target *target; MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_PTL_RPTL_GET); MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_PTL_RPTL_GET); ret = find_target(target_id, &target); RPTLU_ERR_POP(ret, "error finding target structure\n"); ret = rptli_op_alloc(&op, target); RPTLU_ERR_POP(ret, "error allocating op\n"); op->op_type = RPTL_OP_GET; op->state = RPTL_OP_STATE_QUEUED; /* store the user parameters */ op->u.get.md_handle = md_handle; op->u.get.local_offset = local_offset; op->u.get.length = length; op->u.get.target_id = target_id; op->u.get.pt_index = pt_index; op->u.get.match_bits = match_bits; op->u.get.remote_offset = remote_offset; op->u.get.user_ptr = user_ptr; op->events_ready = 0; op->target = target; MPL_DL_APPEND(target->data_op_list, op); ret = poke_progress(); RPTLU_ERR_POP(ret, "Error from poke_progress\n"); fn_exit: MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_PTL_RPTL_GET); return ret; fn_fail: goto fn_exit; }
int MPIR_T_pvar_handle_alloc_impl(MPI_T_pvar_session session, int pvar_index, void *obj_handle, MPI_T_pvar_handle *handle,int *count) { int mpi_errno = MPI_SUCCESS; int cnt, extra, bytes; int is_sum, is_watermark; const pvar_table_entry_t *info; MPIR_T_pvar_handle_t *hnd; MPIR_CHKPMEM_DECL(1); info = (pvar_table_entry_t *) utarray_eltptr(pvar_table, pvar_index); if (info->get_count == NULL) { cnt = info->count; } else { info->get_count(info->addr, obj_handle, &cnt); } bytes = MPID_Datatype_get_basic_size(info->datatype); is_sum = FALSE; is_watermark = FALSE; extra = 0; if (info->varclass == MPI_T_PVAR_CLASS_COUNTER || info->varclass == MPI_T_PVAR_CLASS_AGGREGATE || info->varclass == MPI_T_PVAR_CLASS_TIMER) { /* Extra memory for accum, offset, current */ is_sum = TRUE; extra = bytes * cnt * 3; } else if (info->varclass == MPI_T_PVAR_CLASS_HIGHWATERMARK || info->varclass == MPI_T_PVAR_CLASS_LOWWATERMARK) { is_watermark = TRUE; } /* Allocate memory and bzero it */ MPIR_CHKPMEM_CALLOC(hnd, MPIR_T_pvar_handle_t*, sizeof(*hnd) + extra, mpi_errno, "performance variable handle"); #ifdef HAVE_ERROR_CHECKING hnd->kind = MPIR_T_PVAR_HANDLE; #endif /* Setup the common fields */ if (is_sum) hnd->flags |= MPIR_T_PVAR_FLAG_SUM; else if (is_watermark) hnd->flags |= MPIR_T_PVAR_FLAG_WATERMARK; hnd->addr = info->addr; hnd->datatype = info->datatype; hnd->count = cnt; hnd->varclass = info->varclass; hnd->flags = info->flags; hnd->session = session; hnd->info = info; hnd->obj_handle = obj_handle; hnd->get_value = info->get_value; hnd->bytes = bytes; hnd->count = cnt; /* Init pointers to cache buffers for a SUM */ if (MPIR_T_pvar_is_sum(hnd)) { hnd->accum = (char*)(hnd) + sizeof(*hnd); hnd->offset = (char*)(hnd) + sizeof(*hnd) + bytes*cnt; hnd->current = (char*)(hnd) + sizeof(*hnd) + bytes*cnt*2; } if (MPIR_T_pvar_is_continuous(hnd)) MPIR_T_pvar_set_started(hnd); /* Set starting value of a continuous SUM */ if (MPIR_T_pvar_is_continuous(hnd) && MPIR_T_pvar_is_sum(hnd)) { /* Cache current value of a SUM in offset. * accum is zero since we called CALLOC before. */ if (hnd->get_value == NULL) MPIR_Memcpy(hnd->offset, hnd->addr, bytes*cnt); else hnd->get_value(hnd->addr, hnd->obj_handle, hnd->count, hnd->offset); } /* Link a WATERMARK handle to its pvar & set starting value if continuous */ if (MPIR_T_pvar_is_watermark(hnd)) { MPIR_T_pvar_watermark_t *mark = (MPIR_T_pvar_watermark_t *)hnd->addr; if (!mark->first_used) { /* Use the special handle slot for optimization if available */ mark->first_used = TRUE; MPIR_T_pvar_set_first(hnd); /* Set starting value */ if (MPIR_T_pvar_is_continuous(hnd)) { mark->first_started = TRUE; mark->watermark = mark->current; } else { mark->first_started = FALSE; } } else { /* If the special handle slot is unavailable, link it to hlist */ if (mark->hlist == NULL) { hnd->prev2 = hnd; mark->hlist = hnd; } else { hnd->prev2 = hnd; hnd->next2 = mark->hlist; mark->hlist->prev2 = hnd; mark->hlist = hnd; } /* Set starting value */ if (MPIR_T_pvar_is_continuous(hnd)) hnd->watermark = mark->current; } } /* Link the handle in its session and return it */ MPL_DL_APPEND(session->hlist, hnd); *handle = hnd; *count = cnt; MPIR_CHKPMEM_COMMIT(); fn_exit: return mpi_errno; fn_fail: MPIR_CHKPMEM_REAP(); goto fn_exit; }
static int win_init(MPI_Aint size, int disp_unit, int create_flavor, int model, MPIR_Info * info, MPIR_Comm * comm_ptr, MPIR_Win ** win_ptr) { int mpi_errno = MPI_SUCCESS; int i; MPIR_Comm *win_comm_ptr; int win_target_pool_size; MPIR_CHKPMEM_DECL(5); MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_WIN_INIT); MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_WIN_INIT); MPID_THREAD_CS_ENTER(POBJ, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); if (initRMAoptions) { MPIDI_CH3_RMA_Init_sync_pvars(); MPIDI_CH3_RMA_Init_pkthandler_pvars(); initRMAoptions = 0; } MPID_THREAD_CS_EXIT(POBJ, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); *win_ptr = (MPIR_Win *) MPIR_Handle_obj_alloc(&MPIR_Win_mem); MPIR_ERR_CHKANDJUMP1(!(*win_ptr), mpi_errno, MPI_ERR_OTHER, "**nomem", "**nomem %s", "MPIR_Win_mem"); mpi_errno = MPIR_Comm_dup_impl(comm_ptr, &win_comm_ptr); if (mpi_errno) MPIR_ERR_POP(mpi_errno); MPIR_Object_set_ref(*win_ptr, 1); /* (*win_ptr)->errhandler is set by upper level; */ /* (*win_ptr)->base is set by caller; */ (*win_ptr)->size = size; (*win_ptr)->disp_unit = disp_unit; (*win_ptr)->create_flavor = create_flavor; (*win_ptr)->model = model; (*win_ptr)->attributes = NULL; (*win_ptr)->comm_ptr = win_comm_ptr; (*win_ptr)->at_completion_counter = 0; (*win_ptr)->shm_base_addrs = NULL; /* (*win_ptr)->basic_info_table[] is set by caller; */ (*win_ptr)->current_lock_type = MPID_LOCK_NONE; (*win_ptr)->shared_lock_ref_cnt = 0; (*win_ptr)->target_lock_queue_head = NULL; (*win_ptr)->shm_allocated = FALSE; (*win_ptr)->states.access_state = MPIDI_RMA_NONE; (*win_ptr)->states.exposure_state = MPIDI_RMA_NONE; (*win_ptr)->num_targets_with_pending_net_ops = 0; (*win_ptr)->start_ranks_in_win_grp = NULL; (*win_ptr)->start_grp_size = 0; (*win_ptr)->lock_all_assert = 0; (*win_ptr)->lock_epoch_count = 0; (*win_ptr)->outstanding_locks = 0; (*win_ptr)->current_target_lock_data_bytes = 0; (*win_ptr)->sync_request_cnt = 0; (*win_ptr)->active = FALSE; (*win_ptr)->next = NULL; (*win_ptr)->prev = NULL; (*win_ptr)->outstanding_acks = 0; /* Initialize the info flags */ (*win_ptr)->info_args.no_locks = 0; (*win_ptr)->info_args.accumulate_ordering = MPIDI_ACC_ORDER_RAR | MPIDI_ACC_ORDER_RAW | MPIDI_ACC_ORDER_WAR | MPIDI_ACC_ORDER_WAW; (*win_ptr)->info_args.accumulate_ops = MPIDI_ACC_OPS_SAME_OP_NO_OP; (*win_ptr)->info_args.same_size = 0; (*win_ptr)->info_args.alloc_shared_noncontig = 0; (*win_ptr)->info_args.alloc_shm = FALSE; /* Set info_args on window based on info provided by user */ mpi_errno = MPID_Win_set_info((*win_ptr), info); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); MPIR_CHKPMEM_MALLOC((*win_ptr)->op_pool_start, MPIDI_RMA_Op_t *, sizeof(MPIDI_RMA_Op_t) * MPIR_CVAR_CH3_RMA_OP_WIN_POOL_SIZE, mpi_errno, "RMA op pool"); (*win_ptr)->op_pool_head = NULL; for (i = 0; i < MPIR_CVAR_CH3_RMA_OP_WIN_POOL_SIZE; i++) { (*win_ptr)->op_pool_start[i].pool_type = MPIDI_RMA_POOL_WIN; MPL_DL_APPEND((*win_ptr)->op_pool_head, &((*win_ptr)->op_pool_start[i])); } win_target_pool_size = MPL_MIN(MPIR_CVAR_CH3_RMA_TARGET_WIN_POOL_SIZE, MPIR_Comm_size(win_comm_ptr)); MPIR_CHKPMEM_MALLOC((*win_ptr)->target_pool_start, MPIDI_RMA_Target_t *, sizeof(MPIDI_RMA_Target_t) * win_target_pool_size, mpi_errno, "RMA target pool"); (*win_ptr)->target_pool_head = NULL; for (i = 0; i < win_target_pool_size; i++) { (*win_ptr)->target_pool_start[i].pool_type = MPIDI_RMA_POOL_WIN; MPL_DL_APPEND((*win_ptr)->target_pool_head, &((*win_ptr)->target_pool_start[i])); } (*win_ptr)->num_slots = MPL_MIN(MPIR_CVAR_CH3_RMA_SLOTS_SIZE, MPIR_Comm_size(win_comm_ptr)); MPIR_CHKPMEM_MALLOC((*win_ptr)->slots, MPIDI_RMA_Slot_t *, sizeof(MPIDI_RMA_Slot_t) * (*win_ptr)->num_slots, mpi_errno, "RMA slots"); for (i = 0; i < (*win_ptr)->num_slots; i++) { (*win_ptr)->slots[i].target_list_head = NULL; } MPIR_CHKPMEM_MALLOC((*win_ptr)->target_lock_entry_pool_start, MPIDI_RMA_Target_lock_entry_t *, sizeof(MPIDI_RMA_Target_lock_entry_t) * MPIR_CVAR_CH3_RMA_TARGET_LOCK_ENTRY_WIN_POOL_SIZE, mpi_errno, "RMA lock entry pool"); (*win_ptr)->target_lock_entry_pool_head = NULL; for (i = 0; i < MPIR_CVAR_CH3_RMA_TARGET_LOCK_ENTRY_WIN_POOL_SIZE; i++) { MPL_DL_APPEND((*win_ptr)->target_lock_entry_pool_head, &((*win_ptr)->target_lock_entry_pool_start[i])); } if (MPIDI_RMA_Win_inactive_list_head == NULL && MPIDI_RMA_Win_active_list_head == NULL) { /* this is the first window, register RMA progress hook */ mpi_errno = MPID_Progress_register_hook(MPIDI_CH3I_RMA_Make_progress_global, &MPIDI_CH3I_RMA_Progress_hook_id); if (mpi_errno) { MPIR_ERR_POP(mpi_errno); } } MPL_DL_APPEND(MPIDI_RMA_Win_inactive_list_head, (*win_ptr)); if (MPIDI_CH3U_Win_hooks.win_init != NULL) { mpi_errno = MPIDI_CH3U_Win_hooks.win_init(size, disp_unit, create_flavor, model, info, comm_ptr, win_ptr); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); } fn_exit: MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_WIN_INIT); return mpi_errno; fn_fail: MPIR_CHKPMEM_REAP(); goto fn_exit; }
static inline int check_and_switch_target_state(MPID_Win * win_ptr, MPIDI_RMA_Target_t * target, int *is_able_to_issue, int *made_progress) { int rank = win_ptr->comm_ptr->rank; int mpi_errno = MPI_SUCCESS; (*made_progress) = 0; (*is_able_to_issue) = 0; if (target == NULL) goto fn_exit; /* When user event happens, move op in user pending list to network pending list */ if (target->sync.sync_flag == MPIDI_RMA_SYNC_FLUSH || target->sync.sync_flag == MPIDI_RMA_SYNC_FLUSH_LOCAL || target->sync.sync_flag == MPIDI_RMA_SYNC_UNLOCK || target->win_complete_flag) { MPIDI_RMA_Op_t *user_op = target->pending_user_ops_list_head; if (user_op != NULL) { if (target->pending_net_ops_list_head == NULL) win_ptr->num_targets_with_pending_net_ops++; MPL_DL_DELETE(target->pending_user_ops_list_head, user_op); MPL_DL_APPEND(target->pending_net_ops_list_head, user_op); if (target->next_op_to_issue == NULL) target->next_op_to_issue = user_op; } } switch (target->access_state) { case MPIDI_RMA_LOCK_CALLED: if (target->sync.sync_flag == MPIDI_RMA_SYNC_NONE || target->sync.sync_flag == MPIDI_RMA_SYNC_FLUSH_LOCAL || target->sync.sync_flag == MPIDI_RMA_SYNC_FLUSH) { if ((target->pending_net_ops_list_head == NULL || !target->pending_net_ops_list_head->piggyback_lock_candidate) && (target->pending_user_ops_list_head == NULL || !target->pending_user_ops_list_head->piggyback_lock_candidate)) { /* issue lock request */ target->access_state = MPIDI_RMA_LOCK_ISSUED; if (target->target_rank == rank) { mpi_errno = acquire_local_lock(win_ptr, target->lock_type); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); } else { mpi_errno = send_lock_msg(target->target_rank, target->lock_type, win_ptr); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); } (*made_progress) = 1; } } else if (target->sync.sync_flag == MPIDI_RMA_SYNC_UNLOCK) { if (target->pending_net_ops_list_head == NULL) { /* No RMA operation has ever been posted to this target, * finish issuing, no need to acquire the lock. Cleanup * function will clean it up. */ target->access_state = MPIDI_RMA_LOCK_GRANTED; /* We are done with ending synchronization, unset target's sync_flag. */ target->sync.sync_flag = MPIDI_RMA_SYNC_NONE; (*made_progress) = 1; } else { /* if we reach WIN_UNLOCK and there is still operation existing * in pending list, this operation must be the only operation * and it is prepared to piggyback LOCK and UNLOCK. */ MPIU_Assert(MPIR_CVAR_CH3_RMA_DELAY_ISSUING_FOR_PIGGYBACKING); MPIU_Assert(target->pending_net_ops_list_head->next == NULL); MPIU_Assert(target->pending_net_ops_list_head->piggyback_lock_candidate); } } break; case MPIDI_RMA_LOCK_GRANTED: case MPIDI_RMA_NONE: if (target->win_complete_flag) { if (target->pending_net_ops_list_head == NULL) { MPIDI_CH3_Pkt_flags_t flags = MPIDI_CH3_PKT_FLAG_NONE; if (target->sync.sync_flag == MPIDI_RMA_SYNC_FLUSH && target->num_ops_flush_not_issued > 0) { flags |= MPIDI_CH3_PKT_FLAG_RMA_FLUSH; win_ptr->outstanding_acks++; target->sync.outstanding_acks++; target->num_ops_flush_not_issued = 0; } mpi_errno = send_decr_at_cnt_msg(target->target_rank, win_ptr, flags); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); /* We are done with ending synchronization, unset target's sync_flag. */ target->sync.sync_flag = MPIDI_RMA_SYNC_NONE; (*made_progress) = 1; } } else if (target->sync.sync_flag == MPIDI_RMA_SYNC_FLUSH) { if (target->pending_net_ops_list_head == NULL) { if (target->target_rank != rank) { if (target->num_ops_flush_not_issued > 0) { win_ptr->outstanding_acks++; target->sync.outstanding_acks++; target->num_ops_flush_not_issued = 0; mpi_errno = send_flush_msg(target->target_rank, win_ptr); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); } } /* We are done with ending synchronization, unset target's sync_flag. */ target->sync.sync_flag = MPIDI_RMA_SYNC_NONE; (*made_progress) = 1; } } else if (target->sync.sync_flag == MPIDI_RMA_SYNC_UNLOCK) { if (target->pending_net_ops_list_head == NULL) { if (target->target_rank == rank) { mpi_errno = MPIDI_CH3I_Release_lock(win_ptr); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); } else { MPIDI_CH3_Pkt_flags_t flag = MPIDI_CH3_PKT_FLAG_NONE; if (target->num_ops_flush_not_issued == 0) { flag = MPIDI_CH3_PKT_FLAG_RMA_UNLOCK_NO_ACK; } else { win_ptr->outstanding_acks++; target->sync.outstanding_acks++; target->num_ops_flush_not_issued = 0; } mpi_errno = send_unlock_msg(target->target_rank, win_ptr, flag); if (mpi_errno != MPI_SUCCESS) MPIR_ERR_POP(mpi_errno); } /* We are done with ending synchronization, unset target's sync_flag. */ target->sync.sync_flag = MPIDI_RMA_SYNC_NONE; (*made_progress) = 1; } } break; default: break; } /* end of switch */ if (target->access_state != MPIDI_RMA_LOCK_ISSUED) { (*is_able_to_issue) = 1; } fn_exit: return mpi_errno; fn_fail: goto fn_exit; }