void armci_send_req(int proc, request_header_t* msginfo, int len) { int msglen = sizeof(request_header_t); lapi_cntr_t *pcmpl_cntr, *pcntr = &(BUF_TO_EVBUF(msginfo)->cntr); int rc; msginfo->tag.cntr= pcntr; #if ARMCI_ENABLE_GPC_CALLS if(msginfo->operation==GET && msginfo->format==VECTOR && msginfo->ehlen){ msginfo->tag.buf = (char *)(msginfo+1)+msginfo->dscrlen; } else #endif msginfo->tag.buf = msginfo+1; if(msginfo->operation==GET || msginfo->operation==LOCK){ SET_COUNTER(*(lapi_cmpl_t*)pcntr,1);/*dataarrive in same buf*/ /*The GPC case. Note that we don't use the parameter len*/ if(msginfo->format==VECTOR && msginfo->ehlen > 0) msglen += msginfo->datalen; if(lapi_max_uhdr_data_sz < msginfo->dscrlen){ msginfo->dscrlen = -msginfo->dscrlen; /* no room for descriptor */ pcntr = NULL; /* GET(descr) from CH will increment buf cntr */ }else msglen += msginfo->dscrlen; /* we should send the mutex, too. When op==LOCK, Value of len parameter is already sizeof(reqest_header_t)+sizeof(int), since we dont use len but construct our own msglen, we need to add sizeof(int). */ if(msginfo->operation==LOCK) msglen += sizeof(int); pcmpl_cntr=NULL; /* don't trace completion status for load ops */ }else if (msginfo->operation==UNLOCK){ msglen += msginfo->dscrlen; pcmpl_cntr=NULL; /* don't trace completion status for unlock */ }else{ if(lapi_max_uhdr_data_sz < (msginfo->datalen + msginfo->dscrlen)){ msginfo->datalen = -msginfo->datalen; msginfo->dscrlen = -msginfo->dscrlen; pcntr = NULL; /* GET/LOCK from CH will increment buf cntr */ }else msglen += msginfo->dscrlen+msginfo->datalen; /* trace completion of store ops */ pcmpl_cntr = &cmpl_arr[msginfo->to].cntr; } if(msginfo->operation==PUT || ARMCI_ACC(msginfo->operation)) UPDATE_FENCE_STATE(msginfo->to, msginfo->operation, 1); if((rc=LAPI_Amsend(lapi_handle,(uint)msginfo->to, (void*)armci_header_handler, msginfo, msglen, NULL, 0, NULL, pcntr, pcmpl_cntr))) armci_die("AM failed",rc); if(DEBUG_) fprintf(stderr,"%d sending req=%d to %d\n", armci_me, msginfo->operation, proc); }
void DDI_Send_request_comm(void *buff,int *to,DDI_Request *req,int commid) { char ack; size_t size = sizeof(DDI_Patch); int i,np,me,nn,my; const DDI_Comm *comm = (const DDI_Comm *) Comm_find(commid); np = comm->np; me = comm->me; nn = comm->nn; my = comm->my; *to = comm->global_dsid[*to]; DEBUG_OUT(LVL3,(stdout,"%s: sending request to global process %i.\n",DDI_Id(),*to)) /* ------------------------------------------------------------ *\ Using TCP/IP sockets, this is always a synchronous operation \* ------------------------------------------------------------ */ # if defined DDI_SOC Send(gv(sockets)[*to],buff,size,0); Recv(gv(sockets)[*to],&ack,1,0); # endif /* -------------------------------------------------------------- *\ Using LAPI, this sends an active message to the target process causing an interrupt signal to be issued. The target process now acts like a data server and handles the data request. This call is non-blocking, because the once the active message is sent, the target process is in control of the data and the originating compute process only needs to wait until all the target process have finished and tells 'this' originating process that it can continue. These are slightly different for get, put and accumulates. \* -------------------------------------------------------------- */ # if defined DDI_LAPI DDI_Patch *patch = (DDI_Patch *) buff; uint tgt = gv(lapi_map)[*to]; void *hdr_hndlr = (void *) gv(am_hndlr)[tgt]; void *udata = NULL; ulong udata_len = 0; lapi_cntr_t *org_cntr = (lapi_cntr_t *) patch->cp_lapi_cntr; lapi_cntr_t *tgt_cntr = NULL; lapi_cntr_t *cmpl_cntr = NULL; if(LAPI_Amsend(gv(lapi_hnd),tgt,hdr_hndlr,buff,size,udata,udata_len, tgt_cntr,org_cntr,cmpl_cntr) != LAPI_SUCCESS) { fprintf(stdout,"%s: lapi_amsend error in ddi_send_request.\n",DDI_Id()); Fatal_error(911); } # endif /* ---------------------------------- *\ The stand-alone MPI version of DDI \* ---------------------------------- */ # if defined CRAY_MPI if(req == NULL) { MPI_Send(buff,size,MPI_BYTE,*to,37,comm->world_comm); } else { MPI_Isend(buff,size,MPI_BYTE,*to,37,comm->world_comm,req); } return; # endif # if defined DDI_MPI && !defined DDI_SOC && !defined DDI_LAPI DEBUG_OUT(LVL3,(stdout,"%s: calling mpi_ssend.\n",DDI_Id())) MPI_Ssend(buff,size,MPI_BYTE,*to,0,comm->world_comm); # endif /* ------------------------------------------------------------- *\ Reverse look up the group rank of the global rank to which to the data request is being sent. \* ------------------------------------------------------------- */ /* if(gv(scope) == DDI_GROUP) { for(i=0; i<np; i++) if(DDI_Id_global_proc(i+np) == *to) *to = i+np; } */ DEBUG_OUT(LVL3,(stdout,"%s: leaving ddi_send_request.\n",DDI_Id())) }