Ejemplo n.º 1
0
/* wait for multiple requests to complete */
int ADIOI_PVFS2_aio_wait_fn(int count, void ** array_of_states, 
		double timeout, MPI_Status *status)
{

    ADIOI_AIO_Request **aio_reqlist;
    PVFS_sys_op_id *op_id_array;
    int i,j, greq_count, completed_count=0;
    int *error_array;

    aio_reqlist = (ADIOI_AIO_Request **)array_of_states;

    op_id_array = (PVFS_sys_op_id*)ADIOI_Calloc(count, sizeof(PVFS_sys_op_id));
    error_array = (int *)ADIOI_Calloc(count, sizeof(int));
    greq_count = count;


    /* PVFS-2.6: testsome actually tests all requests and fills in op_id_array
     * with the ones that have completed.  count is an in/out parameter.
     * returns with the number of completed operations.  what a mess! */
    while (completed_count < greq_count ) {
	count = greq_count;
	PVFS_sys_testsome(op_id_array, &count, NULL, error_array, INT_MAX);
	completed_count += count;
	for (i=0; i< count; i++) {
	    for (j=0; j<greq_count; j++) {
		if (op_id_array[i] == aio_reqlist[j]->op_id) {
		    aio_reqlist[j]->nbytes = 
			aio_reqlist[j]->resp_io.total_completed;
		    MPI_Grequest_complete(aio_reqlist[j]->req);
		}
	    }
	}
    }
    return MPI_SUCCESS; /* TODO: no idea how to deal with errors */
}
Ejemplo n.º 2
0
int MPI_File_iread_at(MPI_File mpi_fh, MPI_Offset offset, void *buf,
                      int count, MPI_Datatype datatype, 
                      MPIO_Request *request)
{
	int error_code;
	MPI_Status *status;

        MPID_CS_ENTER();
        MPIR_Nest_incr();

	status = (MPI_Status *) ADIOI_Malloc(sizeof(MPI_Status));

	/* for now, no threads or anything fancy. 
	 * just call the blocking version */
	error_code = MPI_File_read_at(mpi_fh, offset, buf, count, datatype,
				      status); 
	/* ROMIO-1 doesn't do anything with status.MPI_ERROR */
	status->MPI_ERROR = error_code;

	/* kick off the request */
	MPI_Grequest_start(MPIU_Greq_query_fn, MPIU_Greq_free_fn, 
			   MPIU_Greq_cancel_fn, status, request);
	/* but we did all the work already */
	MPI_Grequest_complete(*request);

        MPIR_Nest_decr();
        MPID_CS_EXIT();

	/* passed the buck to the blocking version...*/
	return MPI_SUCCESS;
}
Ejemplo n.º 3
0
/* poll for completion of a single outstanding AIO request */
int ADIOI_NTFS_aio_poll_fn(void *extra_state, MPI_Status *status)
{
    ADIOI_AIO_Request *aio_req;
    int mpi_errno = MPI_SUCCESS;

    /* FIXME: Validate the args -- has it already been done by the 
       caller ? */

    aio_req = (ADIOI_AIO_Request *)extra_state;
    
    /* XXX: test for AIO completion here */
    if(!GetOverlappedResult( aio_req->fd, aio_req->lpOvl, 
                            &(aio_req->nbytes), FALSE)){
        if(GetLastError() == ERROR_IO_INCOMPLETE){
        /* IO in progress */
	    /* TODO: need to diddle with status somehow */
        }else{
        /* Error occured */
        /* TODO: unsure how to handle this */    
        }
    }else{
        mpi_errno = MPI_Grequest_complete(aio_req->req);
	    if (mpi_errno != MPI_SUCCESS) {
		    mpi_errno = MPIO_Err_create_code(MPI_SUCCESS,
				    MPIR_ERR_RECOVERABLE,
				    "ADIOI_NTFS_aio_poll_fn", __LINE__,
				    MPI_ERR_IO, "**mpi_grequest_complete",
				    0);
	    }
    }
    return mpi_errno;
}
Ejemplo n.º 4
0
/* generic POSIX aio completion test routine */
int ADIOI_GEN_aio_poll_fn(void *extra_state, MPI_Status *status)
{
    ADIOI_AIO_Request *aio_req;
    int errcode=MPI_SUCCESS;

    aio_req = (ADIOI_AIO_Request *)extra_state;

    /* aio_error returns an ERRNO value */
    errno = aio_error(aio_req->aiocbp);
    if (errno == EINPROGRESS) {
	    /* TODO: need to diddle with status somehow */
    }
    else if (errno == ECANCELED) {
	    /* TODO: unsure how to handle this */
    } else if (errno == 0) {
	    ssize_t n = aio_return(aio_req->aiocbp);
	    aio_req->nbytes = n;
	    errcode = MPI_Grequest_complete(aio_req->req);
	    /* --BEGIN ERROR HANDLING-- */
	    if (errcode != MPI_SUCCESS) {
		    errcode = MPIO_Err_create_code(MPI_SUCCESS,
				    MPIR_ERR_RECOVERABLE,
				    "ADIOI_GEN_aio_poll_fn", __LINE__,
				    MPI_ERR_IO, "**mpi_grequest_complete",
				    0);
	    }
	    /* --END ERROR HANDLING-- */
    }
    return errcode;
}
Ejemplo n.º 5
0
/* Wait for completion of one of the outstanding AIO requests */
int ADIOI_NTFS_aio_wait_fn(int count, void **array_of_states,
		double timeout, MPI_Status *status)
{
	int i, mpi_errno = MPI_SUCCESS;
	ADIOI_AIO_Request **aio_reqlist;
    LPHANDLE lpHandles;
    DWORD retObject=0;

    /* FIXME: Validate the args -- has it already been done by the 
       caller ? */
	aio_reqlist = (ADIOI_AIO_Request **)array_of_states;
    lpHandles = (LPHANDLE) ADIOI_Calloc(count, sizeof(HANDLE));
    if (lpHandles == NULL)
    {
	mpi_errno = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
	    "ADIOI_NTFS_aio_wait_fn", __LINE__, MPI_ERR_IO,
	    "**nomem", "**nomem %s", "Event handles");
	return mpi_errno;
    }
	/* XXX: set-up arrays of outstanding requests */
    for(i=0; i<count; i++){
        lpHandles[i] = (aio_reqlist[i])->lpOvl->hEvent;
    }

	/* XXX: wait for one request to complete */
    /* FIXME: Is the timeout in seconds ? */
    timeout = (timeout <= 0) ? INFINITE : (timeout * 1000);
    
    if((retObject = WaitForMultipleObjects(count, lpHandles,
                    FALSE, timeout)) != WAIT_FAILED){
        retObject = retObject - WAIT_OBJECT_0;
        if(GetOverlappedResult( aio_reqlist[retObject]->fd, 
                aio_reqlist[retObject]->lpOvl, &(aio_reqlist[retObject]->nbytes), 
                FALSE)){
        	/* XXX: mark completed requests as 'done'*/
            mpi_errno = MPI_Grequest_complete(aio_reqlist[retObject]->req);
    	    if (mpi_errno != MPI_SUCCESS) {
	    	    mpi_errno = MPIO_Err_create_code(MPI_SUCCESS,
				    MPIR_ERR_RECOVERABLE,
				    "ADIOI_NTFS_aio_wait_fn", __LINE__,
				    MPI_ERR_IO, "**mpi_grequest_complete",
				    0);
            }
        }else{
            if(GetLastError() == ERROR_IO_INCOMPLETE){
            /* IO in progress */
	        /* TODO: need to diddle with status somehow */
            }else{
            /* Error occured */
            /* TODO: not sure how to handle this */    
            }
        }
    }else{
        /* TODO: How to handle error while waiting ? */
    }
    ADIOI_Free(lpHandles);
	return mpi_errno;
}
Ejemplo n.º 6
0
MTEST_THREAD_RETURN_TYPE do_work(void *arg)
{
  MPI_Request *req = (MPI_Request *)arg;
  IF_VERBOSE(("Starting work in thread ...\n"));
  MTestSleep(3);
  IF_VERBOSE(("Work in thread done !!!\n"));
  MPI_Grequest_complete(*req);
  return MTEST_THREAD_RETVAL_IGN;
}
Ejemplo n.º 7
0
int ADIOI_PVFS2_aio_poll_fn(void *extra_state, MPI_Status *status)
{
    ADIOI_AIO_Request *aio_req;
    int ret, error;

    aio_req = (ADIOI_AIO_Request *)extra_state;

    /* BUG: cannot PVFS_sys_testsome: does not work for a specific request */
    ret = PVFS_sys_wait(aio_req->op_id, "ADIOI_PVFS2_aio_poll_fn", &error);
    if (ret == 0) {
	aio_req->nbytes = aio_req->resp_io.total_completed;
	MPI_Grequest_complete(aio_req->req);
	return MPI_SUCCESS;
    } else
	return MPI_UNDEFINED; /* TODO: what's this error? */
}
Ejemplo n.º 8
0
void MPIO_Completed_request_create(MPI_File *fh, MPI_Offset bytes,
		int *error_code, MPI_Request *request)
{
	MPI_Status *status;
	status = (MPI_Status *)ADIOI_Malloc(sizeof(MPI_Status));

	status->MPI_ERROR = *error_code;
#ifdef HAVE_STATUS_SET_BYTES
	MPIR_Status_set_bytes(status, MPI_BYTE, bytes);
#endif
	/* --BEGIN ERROR HANDLING-- */
	if (*error_code != MPI_SUCCESS)
		*error_code = MPIO_Err_return_file(*fh, *error_code);
	/* --END ERROR HANDLING-- */
	MPI_Grequest_start(MPIU_Greq_query_fn, MPIU_Greq_free_fn, 
			MPIU_Greq_cancel_fn, status, request);
	MPI_Grequest_complete(*request);
}
Ejemplo n.º 9
0
static void ADIOI_GEN_IreadStridedColl_fini(ADIOI_NBC_Request *nbc_req,
                                            int *error_code)
{
    ADIOI_GEN_IreadStridedColl_vars *vars = nbc_req->data.rd.rsc_vars;
    MPI_Count size;

    /* This is a temporary way of filling in status. The right way is to
       keep track of how much data was actually read and placed in buf
       during collective I/O. */
    MPI_Type_size_x(vars->datatype, &size);
    nbc_req->nbytes = size * vars->count;

    /* free the struct for parameters and variables */
    if (nbc_req->data.rd.rsc_vars) {
        ADIOI_Free(nbc_req->data.rd.rsc_vars);
        nbc_req->data.rd.rsc_vars = NULL;
    }

    /* make the request complete */
    *error_code = MPI_Grequest_complete(nbc_req->req);
    nbc_req->data.rd.state = ADIOI_IRC_STATE_COMPLETE;
}
Ejemplo n.º 10
0
/*
 * ADIOI_Sync_thread_start - start the synchronisation routine
 */
void *ADIOI_Sync_thread_start(void *ptr) {
    ADIOI_Sync_thread_t t = (ADIOI_Sync_thread_t)ptr;
    ADIOI_Atomic_queue_t q = (ADIOI_Atomic_queue_t)t->sub_;
    ADIOI_Sync_req_t r;
    size_t wr_count;
    MPI_Count datatype_size;
    char *buf;
    ADIO_Offset bytes_xfered, len, buf_size, offset, off;
    int type, count, fflags, error_code;
    ADIO_Request *req;
    MPI_Datatype datatype;

    /* get sync buffer size */
    t->fd_;
    buf_size = t->fd_->hints->ind_wr_buffer_size;
    buf = (char *)ADIOI_Malloc(buf_size);

    for(;;) {
	/* get a new sync request */
#ifndef _USE_PTHREAD_MUTEX_
	if ((r = ADIOI_Atomic_queue_front(q)) == NULL)
	    continue;
#else
	r = ADIOI_Atomic_queue_front(q);
#endif
	/* pop sync request */
	ADIOI_Atomic_queue_pop(q);

	/* get request type */
	ADIOI_Sync_req_get_key(r, ADIOI_SYNC_TYPE, &type);

	/* check for shutdown type */
	if (type == ADIOI_THREAD_SHUTDOWN) {
	    break;
	}

	/* if sync type get all the fields */
	ADIOI_Sync_req_get_key(r, ADIOI_SYNC_ALL, &offset,
		&datatype, &count, &req, &error_code, &fflags);

	/* init I/O req */
	MPI_Type_size_x(datatype, &datatype_size);
	len = (ADIO_Offset)datatype_size * (ADIO_Offset)count;
	bytes_xfered = 0;
	off = offset;

	/* satisfy sync req */
	while (bytes_xfered < len) {
	    wr_count = (size_t)ADIOI_MIN(buf_size, len - bytes_xfered);
#ifdef ADIOI_MPE_LOGGING
	    MPE_Log_event(ADIOI_MPE_thread_read_a, 0, NULL);
#endif
	    /* read data from cache file */
	    pread(t->fd_->cache_fd->fd_sys, buf, wr_count, offset);
#ifdef ADIOI_MPE_LOGGING
	    MPE_Log_event(ADIOI_MPE_thread_read_b, 0, NULL);
	    MPE_Log_event(ADIOI_MPE_thread_write_a, 0, NULL);
#endif
	    /* write data to global file */
	    pwrite(t->fd_->fd_sys, buf, wr_count, offset);
#ifdef ADIOI_MPE_LOGGING
	    MPE_Log_event(ADIOI_MPE_thread_write_b, 0, NULL);
#endif
	    /* update offset */
	    bytes_xfered += (ADIO_Offset)wr_count;
	    offset += (ADIO_Offset)wr_count;
	}

	/* unlock extent locked in ADIO_WriteContig() */
	if (t->fd_->hints->e10_cache_coherent == ADIOI_HINT_ENABLE)
	    ADIOI_UNLOCK(t->fd_, off, SEEK_SET, len);

	/*  ---Begin Error Handling--- */
	/*  --- End Error Handling --- */

	/* complete Grequest */
	MPI_Grequest_complete(*req);
    }

    ADIOI_Free(buf);
    pthread_exit(NULL);
}
void mpi_grequest_complete_f(MPI_Fint *request, MPI_Fint *ierr)
{
    MPI_Request c_req = MPI_Request_f2c(*request);

    *ierr = OMPI_INT_2_FINT(MPI_Grequest_complete(c_req));
}
Ejemplo n.º 12
0
/* wait for multiple requests to complete */
int ADIOI_GEN_aio_wait_fn(int count, void ** array_of_states, 
		double timeout, MPI_Status *status)
{
	const struct aiocb **cblist;
	int err, errcode=MPI_SUCCESS;
	int nr_complete=0;
	double starttime;
	struct timespec aio_timer;
	struct timespec *aio_timer_p = NULL;

	ADIOI_AIO_Request **aio_reqlist;
	int i;

	aio_reqlist = (ADIOI_AIO_Request **)array_of_states;

	cblist = (const struct aiocb**) ADIOI_Calloc(count, sizeof(struct aiocb*));

	starttime = MPI_Wtime();
	if (timeout >0) {
	    aio_timer.tv_sec = (time_t)timeout;
	    aio_timer.tv_nsec = timeout - aio_timer.tv_sec;
	    aio_timer_p = &aio_timer;
	}
	for (i=0; i< count; i++)
	{
		cblist[i] = aio_reqlist[i]->aiocbp;
	}

	while(nr_complete < count) {
	    do {
		err = aio_suspend(cblist, count, aio_timer_p);
	    } while (err < 0 && errno == EINTR);
	    if (err == 0) 
	    { /* run through the list of requests, and mark all the completed
		 ones as done */
		for (i=0; i< count; i++)
		{
		    /* aio_error returns an ERRNO value */
		    if (aio_reqlist[i]->aiocbp == NULL) 
			continue;
		    errno = aio_error(aio_reqlist[i]->aiocbp);
		    if (errno == 0) {
			ssize_t n = aio_return(aio_reqlist[i]->aiocbp);
			aio_reqlist[i]->nbytes = n;
			errcode = MPI_Grequest_complete(aio_reqlist[i]->req);
			if (errcode != MPI_SUCCESS) {
			    errcode = MPIO_Err_create_code(MPI_SUCCESS,
				    MPIR_ERR_RECOVERABLE,
				    "ADIOI_GEN_aio_wait_fn", 
				    __LINE__, MPI_ERR_IO, 
				    "**mpi_grequest_complete", 0);
			}
			ADIOI_Free(aio_reqlist[i]->aiocbp);
			aio_reqlist[i]->aiocbp = NULL;
			cblist[i] = NULL;
			nr_complete++;
		    } 
		    /* TODO: need to handle error conditions somehow*/
		}
	    } /* TODO: also need to handle errors here  */
	    if ( (timeout > 0) && (timeout < (MPI_Wtime() - starttime) ))
		break;
	}

	if (cblist != NULL) ADIOI_Free(cblist);
        return errcode;
}