/* * Sortof like ftruncate, except won't make the * file shorter. */ static int fgrow(const int fd, const off_t len) { struct ffc_stat_s sb; struct ffsw sw; if (fffcntl(fd, FC_STAT, &sb, &sw) < 0) return errno; if (len < sb.st_size) return ENOERR; { const long dumb = 0; /* cache current position */ const off_t pos = ffseek(fd, 0, SEEK_CUR); if(pos < 0) return errno; if (ffseek(fd, len-sizeof(dumb), SEEK_SET) < 0) return errno; if(ffwrite(fd, (void *)&dumb, sizeof(dumb)) < 0) return errno; if (ffseek(fd, pos, SEEK_SET) < 0) return errno; } /* else */ return ENOERR; }
/* * Sortof like ftruncate, except won't make the file shorter. Differs * from fgrow by only writing one byte at designated seek position, if * needed. */ static int fgrow2(const int fd, const off_t len) { struct ffc_stat_s sb; struct ffsw sw; if (fffcntl(fd, FC_STAT, &sb, &sw) < 0) return errno; if (len <= sb.st_size) return ENOERR; { const char dumb = 0; /* we don't use ftruncate() due to problem with FAT32 file systems */ /* cache current position */ const off_t pos = ffseek(fd, 0, SEEK_CUR); if(pos < 0) return errno; if (ffseek(fd, len-1, SEEK_SET) < 0) return errno; if(ffwrite(fd, (void *)&dumb, sizeof(dumb)) < 0) return errno; if (ffseek(fd, pos, SEEK_SET) < 0) return errno; } return ENOERR; }
/*\ Check if asynchronous I/O operation completed. If yes, invalidate id. \*/ int elio_probe(io_request_t *req_id, int* status) { int errval=-1; int aio_i = 0; #ifdef PABLO int pablo_code = PABLO_elio_probe; PABLO_start( pablo_code ); #endif if(*req_id == ELIO_DONE){ *status = ELIO_DONE; } else { #ifdef AIO # if defined(CRAY) # if defined(FFIO) { struct ffsw dumstat, *prdstat=&(cb_fout[*req_id].stat); fffcntl(cb_fout[*req_id].filedes, FC_ASPOLL, prdstat, &dumstat); errval = (FFSTAT(*prdstat) == 0) ? INPROGRESS: 0; } # else errval = ( IO_DONE(cb_fout[*req_id].stat) == 0)? INPROGRESS: 0; # endif # elif defined(AIX) errval = aio_error(cb_fout[(int)*req_id].aio_handle); # else errval = aio_error(cb_fout+(int)*req_id); # endif #endif switch (errval) { case 0: while(aio_req[aio_i] != *req_id && aio_i < MAX_AIO_REQ) aio_i++; if(aio_i >= MAX_AIO_REQ) ELIO_ERROR(HANDFAIL, aio_i); *req_id = ELIO_DONE; *status = ELIO_DONE; aio_req[aio_i] = NULL_AIO; break; case INPROGRESS: *status = ELIO_PENDING; break; default: return PROBFAIL; } } #ifdef PABLO PABLO_end(pablo_code); #endif return ELIO_OK; }
/* ncio_ffio_sync_noffflush is only needed if the FFIO global layer is * used, because it currently has a bug that causes the PEs to hang * RKO 06/26/98 */ static int ncio_ffio_sync_noffflush(ncio *const nciop) { struct ffc_stat_s si; /* for call to fffcntl() */ struct ffsw ffstatus; /* to return ffsw.sw_error */ /* run some innocuous ffio routine to get if any errno */ if(fffcntl(nciop->fd, FC_STAT, &si, &ffstatus) < 0) return ffstatus.sw_error; return ENOERR; }
/* * What is the preferred I/O block size? * (This becomes the default *sizehint == ncp->chunk in the higher layers.) * TODO: What is the the best answer here? */ static size_t blksize(int fd) { struct ffc_stat_s sb; struct ffsw sw; if (fffcntl(fd, FC_STAT, &sb, &sw) > -1) { if(sb.st_oblksize > 0) return (size_t) sb.st_oblksize; } /* else, silent in the face of error */ return (size_t) 32768; }
/*\ Wait for asynchronous I/O operation to complete. Invalidate id. \*/ int elio_wait(io_request_t *req_id) { int aio_i=0; int rc; rc=0; /* just to remove the compiler warning */ #ifdef PABLO int pablo_code = PABLO_elio_wait; PABLO_start( pablo_code ); #endif if(*req_id != ELIO_DONE ) { # ifdef AIO # if defined(CRAY) # if defined(FFIO) { struct ffsw dumstat, *prdstat=&(cb_fout[*req_id].stat); fffcntl(cb_fout[*req_id].filedes, FC_RECALL, prdstat, &dumstat); if (FFSTAT(*prdstat) == FFERR) ELIO_ERROR(SUSPFAIL,0); } # else { struct iosw *statlist[1]; statlist[0] = &(cb_fout[*req_id].stat); recall(cb_fout[*req_id].filedes, 1, statlist); } # endif # elif defined(AIX) # if !defined(AIX52) && !defined(_AIO_AIX_SOURCE) do { /* I/O can be interrupted on SP through rcvncall ! */ rc =(int)aio_suspend(1, cb_fout_arr+(int)*req_id); } while(rc == -1 && errno == EINTR); # endif # else if((int)aio_suspend((const struct aiocb *const*)(cb_fout_arr+(int)*req_id), 1, NULL) != 0) rc =-1; # endif if(rc ==-1) ELIO_ERROR(SUSPFAIL,0); # if defined(DECOSF) /* on DEC aio_return is required to clean internal data structures */ if(aio_return(cb_fout+(int)*req_id) == -1) ELIO_ERROR(RETUFAIL,0); # endif #endif while(aio_req[aio_i] != *req_id && aio_i < MAX_AIO_REQ) aio_i++; if(aio_i >= MAX_AIO_REQ) ELIO_ERROR(HANDFAIL, aio_i); aio_req[aio_i] = NULL_AIO; *req_id = ELIO_DONE; } #ifdef PABLO PABLO_end(pablo_code); #endif return ELIO_OK; }