/* * _sds_fr_mem moves data into SDS from user memory. * * Returns 0 on normal return, or else -1 with error code in errno. */ _sds_fr_mem( bitptr sdsaddr, /* SDS bit address where data will be received */ bitptr ubuf, /* user buffer containing data */ int nbits /* number of bits to move */ ) { long sds_bit_offset; char *ucaddr; long *uwaddr; int ret; if (nbits & (BITPBLOCK - 1)) { errno = FDC_ERR_GRAN; /* must be block multiple */ return -1; } ucaddr = BPTR2CP(ubuf); uwaddr = BPTR2WP(ubuf); if (ucaddr != (char*)uwaddr) { errno = FDC_ERR_SDSWB; /* must be word-aligned */ return -1; } sds_bit_offset = SUBT_BPTR(sdsaddr, WPTR2BP(0)); if (sds_bit_offset & (BITPBLOCK - 1)) { errno = FDC_ERR_GRAN; /* must be block multiple */ return -1; } ret = sswrite(uwaddr, BITS2BLOCKS(sds_bit_offset), BITS2BLOCKS(nbits)); if (ret == -1) errno = FDC_ERR_SDSIO; return ret; }
int _er90b_write(struct fdinfo *fio, bitptr bufptr, int nbytes, struct ffsw *retstat, int fulp, int *ubc) { int ret; int nbt = 0; /* number of bytes transferred so far */ int nbreq; /* number of bytes requested this request */ char *buf; ER90BYT *f; int zero = 0; struct ffsw dumstat; buf = BPTR2CP(bufptr); if ((BPBITOFF(bufptr) & 7) != 0 || *ubc != 0) ERETURN(retstat, FDC_ERR_UBC, 0); nbreq = nbytes; if (fio->rwflag == POSITIN) { f = (ER90BYT *)fio->lyr_info; if (f->tpos) { ret = _tape_tpwait(f->fd, &(f->tpos)); if (ret < 0) ERETURN(retstat, errno, 0); } } else if (fio->rwflag == READIN) { /* write after read requires position to zero */ ret = _er90b_pos(fio, FP_RSEEK, &zero, 1, &dumstat); if (ret < 0) { *retstat = dumstat; return(ERR); } } if (nbreq > 0) { again: /* It is not safe to reissue the write if it fails */ /* with EINTR. Some data may have been transferred */ ret= write(fio->realfd, buf, nbreq); if (ret < 0) ERETURN(retstat, errno, nbt); nbt += ret; /* * The assumption is made here that the system will never return * zero bytes on a non-zero request without an error! */ if (nbt < nbytes) { buf += ret; nbreq -= ret; goto again; } } else if (nbytes < 0) ERETURN(retstat, FDC_ERR_REQ, 0); SETSTAT(retstat, FFCNT, nbt); fio->rwflag = WRITIN; return (nbt); }
ssize_t _sys_read( struct fdinfo *fio, bitptr bufptr, size_t nbytes, struct ffsw *retstat, int fulp, int *ubc) { ssize_t ret = 0; char *buf; buf = BPTR2CP(bufptr); if ((BPBITOFF(bufptr) & 7) != 0 || *ubc != 0) ERETURN(retstat, FDC_ERR_UBC, 0); #ifdef __mips /* * If our last i/o was asynchronous, then our file position * won't be what we expect. Seek to the right position. We * could use a pread instead of seeking, but that would also * not update the file position. I'm doing this because it seems * to me most 'expected' for the system call layer. */ if (((struct sys_f *)fio->lyr_info)->needpos) { if (lseek( fio->realfd, ((struct sys_f *)fio->lyr_info)->curpos, 0) < 0) ERETURN(retstat, errno, 0); ((struct sys_f *)fio->lyr_info)->needpos = 0; } #endif if (nbytes > 0) { if (((struct sys_f *)fio->lyr_info)->nointrio) ret = read(fio->realfd, buf, nbytes); else { LOOP_SYSCALL(ret, read(fio->realfd, buf, nbytes)); } if (ret < 0) ERETURN(retstat, errno, 0); } if (ret == 0 && nbytes > 0) { SETSTAT(retstat, FFEOD, ret); } else { SETSTAT(retstat, FFCNT, ret); #ifdef __mips ((struct sys_f *)(fio->lyr_info))->curpos += ret; #endif } return (ret); }
int _er90b_writea(struct fdinfo *fio, bitptr bufptr, int nbytes, struct ffsw *retstat, int fulp, int *ubc) { int ret = 0; char *buf; ER90BYT *f; int zero = 0; struct ffsw dumstat; buf = BPTR2CP(bufptr); if ((BPBITOFF(bufptr) & 7) != 0 || *ubc != 0) ERETURN(retstat, FDC_ERR_UBC, 0); if (fio->rwflag == POSITIN) { f = (ER90BYT *)fio->lyr_info; if (f->tpos) { ret = _tape_tpwait(f->fd, &(f->tpos)); if (ret < 0) ERETURN(retstat, errno, 0); } } else if (fio->rwflag == READIN) { /* write after read requires position to zero */ ret = _er90b_pos(fio, FP_RSEEK, &zero, 1, &dumstat); if (ret < 0) { *retstat = dumstat; return(ERR); } } if (nbytes > 0) { CLRFFSTAT(*retstat); /* flag async in progress */ ret= writea(fio->realfd, buf, nbytes, (struct iosw *)retstat, 0); if (ret < 0) ERETURN(retstat, errno, 0); } else if (nbytes < 0) { ERETURN(retstat, FDC_ERR_REQ, 0); } else { /* nbytes == 0 */ retstat->sw_flag = 1; FFSTAT(*retstat) = FFCNT; /* I/O is done, and other stat */ /* fields are already set. */ } fio->rwflag = WRITIN; return (ret); }
/* * Write an EOD to a cache file. * * Truncate this layer at the current position. */ int _cch_weod( struct fdinfo *fio, struct ffsw *stat) { int nbu, i; off_t fsize; int bs; struct cch_buf *cbufs; struct cch_f *cch_info; struct fdinfo *llfio; cch_info = (struct cch_f *)fio->lyr_info; if ((fio->opn_flags & O_ACCMODE) == O_RDONLY) ERETURN(stat, EBADF, 0); if (cch_info->is_blkspec) ERETURN(stat, FDC_ERR_NOSUP, 0); cch_info->fsize = cch_info->cpos; fio->rwflag = WRITIN; fio->ateof = 0; fio->ateod = 1; fio->recbits = 0; /* * Fix up any cache page buffers for file pages which lie past the new EOD. */ nbu = cch_info->nbufs; bs = cch_info->bsize; cbufs = cch_info->bufs; fsize = cch_info->fsize; for (i=0; i<nbu; i++) { off_t filead = cbufs[i].filead; if (filead >= 0) { /* If page is past EOD then mark it free */ if (filead >= fsize) cbufs[i].filead = -1; /* If page straddles EOD then zero out part of it */ else if (filead < fsize && filead + bs > fsize) { int valid_bytes = BITS2BYTES(fsize - filead); #ifdef CCH_SDS_SUPPORTED if (cch_info->optflags & CCHOPT_SDS) { int sds_offset; int res; sds_offset = BPTR2CP(cbufs[i].buf) - (char *)NULL; res = _sdsset( sds_offset + valid_bytes, 0, BITS2BYTES(bs) - valid_bytes); if (res == -1) ERETURN(stat, errno, 0); } else #endif { (void)memset( BPTR2CP(cbufs[i].buf) + valid_bytes, 0, BITS2BYTES(bs) - valid_bytes); } } } } /* * Truncate the underlying layer at the same location. For most layers, * this ensures that data past this EOD becomes zero if the underlying file * is later extended such that a hole is left between the this EOD * and the data written later. */ llfio = fio->fioptr; if (fsize < cch_info->feof) { if (XRCALL(llfio,seekrtn) llfio, BITS2BYTES(fsize), SEEK_SET, stat) == ERR) return(ERR); if (XRCALL(llfio,weodrtn) llfio, stat) == ERR) return(ERR); cch_info->feof = fsize; } SETSTAT(stat, FFEOD, 0); return(0); }
int _cdc_open( const char *name, int flags, int mode, struct fdinfo *fio, union spec_u *spec, struct ffsw *stat, long cbits, int cblks, struct gl_o_inf *oinf) { int nextfio = 0; int ll_blocked; char *ptr; union spec_u *nspec; int recsize, blksize; struct ffsw *dumstat; struct cdc_f *cdc_info; recsize = 0; /* this is ignored */ /* * Blocksize is 512 60 bit words, or 5120 6-bit characters */ blksize = 5120*6; /* other block sizes not allowed */ /* * Internally, both blksize and recsize are in bits! */ switch(spec->fld.recfmt) { case TR_CDC_CZ: fio->maxrecsize = recsize; break; case TR_CDC_CS: case TR_CDC_IW: case TR_CDC_CW: fio->maxrecsize = -1; break; } fio->maxblksize = blksize; /* * Allocate buffer: * block size plus possible 48 bit block terminator plus one 60-bit word * plus 16 slop bytes. */ fio->_ffbufsiz = blksize + 48 + 64 + 64 + 7; /* bufsiz in bytes + fudge */ ptr = malloc((fio->_ffbufsiz >> 3) + 16); if (ptr == NULL) goto nomem; /* * Allocate private storage area */ cdc_info = (struct cdc_f *)calloc(sizeof(struct cdc_f), 1); if (cdc_info == NULL) goto nomem; fio->lyr_info = (char *)cdc_info; SET_BPTR(fio->_base, CPTR2BP(ptr)); fio->rwflag = POSITIN; fio->segbits = 0; fio->_cnt = 0; fio->_ptr = fio->_base; /* * Now, open the lower layers... */ nspec = spec; NEXT_SPEC(nspec); nextfio = _ffopen(name, flags, mode, nspec, stat, cbits, cblks, NULL, oinf); if (nextfio < 0) goto badret; fio->fioptr = (struct fdinfo *)nextfio; XRCALL(fio->fioptr, fcntlrtn) fio->fioptr, FC_GETINFO, &cdc_info->ffci, &dumstat); ll_blocked = cdc_info->ffci.ffc_flags & FFC_REC; switch(fio->subtype) { case TR_CDC_BT_DISK: break; /* either record or stream is OK */ case TR_CDC_BT_SI: case TR_CDC_BT_I: if (ll_blocked == 0) /* if not blocked */ { _SETERROR(stat, FDC_ERR_NOBDRY, 0); goto badret; } break; } DUMP_IOB(fio); /* debugging only */ return(nextfio); nomem: _SETERROR(stat, FDC_ERR_NOMEM, 0); badret: if (nextfio > 0) XRCALL(fio->fioptr, closertn) fio->fioptr, &dumstat); if (BPTR2CP(fio->_base) != NULL) free(BPTR2CP(fio->_base)); if (fio->lyr_info != NULL) free(fio->lyr_info); return(ERR); }
struct cch_buf * _cch_getblk( struct cch_f *cch_info, /* cch_f structure for the file */ struct fdinfo *llfio, /* ffio file descriptor for underlying layer */ off_t fileaddr, /* bit offset within the file of the buffer. * This number must be a multiple of the buffer * size. */ int64 *nblkp, /* on input, the number of contiguous buffer * blocks sought. On output, assigned the * actual number of contiguous buffer blocks * assigned. */ int rd, /* 0 if all of the new blocks may be * assigned without reading the file page. * != 0 if the pages must be read. */ int valid, /* 0 if the CCH_VALIDBUFFER bit should */ /* not be set in the new blocks */ struct ffsw *stat /* pointer to status return word */ ) { int i, nbu, ret; int bs; int lru_id; /* buffer number of least recently */ /* used buffer. */ int limit; int64 nblk; off_t endaddr, firstpaddr, faddr; long *wptr; long lru_tm; struct cch_buf *cubuf; struct cch_buf *cbufs; struct cch_buf *fb; CCH_DEBUG(("_cch_getblk EN: to bit offset %d\n",fileaddr)); nbu = cch_info->nbufs; cbufs = cch_info->bufs; bs = cch_info->bsize; nblk = *nblkp; if (nblk > 1) { /* * Find the first page in the consecutive list of pages which * is buffer-resident. */ endaddr = fileaddr + nblk * bs; firstpaddr = endaddr; for (i=0; i<nbu; i++) { off_t x; cubuf = &cbufs[i]; x = cubuf->filead; if (fileaddr <= x && x < firstpaddr) firstpaddr = x; } if (firstpaddr < endaddr) /* a page is buffer resident */ nblk = *nblkp = (firstpaddr - fileaddr) / bs; if (nblk <= 0) return((struct cch_buf *)NULL); /* shouldn't happen ! */ } /* * Find the least-recently accessed sequence of *nblkp contiguous buffers. * Free buffers are counted as if their last access time was 0. * Search the buffers in groups of size nblk to speed this search and * reduce fragmentation of the cache. When nblk>1, this algorithm * approximates LRU and, most importantly, is deterministic. */ lru_tm = MAXLONG; /* min _rtc() value in upcoming loop */ lru_id = 0; for (i=0; i<(nbu-nblk+1); i+=nblk) { long last_access = 0; /* free pages have last_access == 0 */ if (cbufs[i].filead >= 0) last_access = cbufs[i].atime; if (last_access < lru_tm) { lru_tm = last_access; lru_id = i; } } /* * Use the least recently used page buffer or group of page buffers. * Flush any of these page buffers which have the dirty bit set. When * several adjacent buffers are dirty and correspond to adjacent pages * in the file, they can be flushed with one request. */ fb = &cbufs[lru_id]; for (i=0; i<nblk; i++) { int contig = 0; /* number of contiguous dirty buffers */ faddr = fb[i].filead; if (faddr == -1) continue; /* buffer is free */ while (i+contig < nblk && (fb[i+contig].flags & CCH_DIRTY) && fb[i+contig].filead == faddr) { if (fb[i+contig].lastdata || fb[i+contig].firstdata) { if (contig == 0) contig = 1; break; } contig++; faddr += bs; } if (contig > 0) { if (faddr > cch_info->fsize) { /* eof is in the last buffer */ /* clear it if necessary */ if ((fb[i+contig-1].flags & CCH_ZEROED) == 0){ bitptr toptr; off_t eofaddr; int pgoff; eofaddr = CCHFLOOR(cch_info->fsize, bs); pgoff = cch_info->fsize - eofaddr; SET_BPTR(toptr, INC_BPTR(fb[i+contig-1].buf, pgoff)); CCH_MEMCLEAR(toptr,(bs - pgoff)); fb[i+contig-1].flags |= CCH_ZEROED; } } ret = _cch_wrabuf(cch_info, llfio, &fb[i], BITS2BYTES(bs), BITS2BYTES(fb[i].filead), contig, &cch_info->feof, #if defined(__mips) || defined(_LITTLE_ENDIAN) 's', /* flush synchronously */ #else 'a', /* flush asynchronously */ #endif stat); if (ret == ERR) return((struct cch_buf *)NULL); i += contig - 1; } } /* * Wait for any active page buffer I/O, and then requisition the buffers * for the appropriate file pages. */ for (i=0; i<nblk; i++) { if (fb[i].flags & (CCH_WRITING | CCH_READING)) { CCHWAITIO(llfio,&fb[i],stat,ret); if (ret == ERR) return((struct cch_buf *)NULL); } fb[i].filead = fileaddr + i * bs; fb[i].flags = CCH_VALID; fb[i].firstdata = fb[i].lastdata = 0; if (valid) fb[i].flags |= CCH_VALIDBUFFER; } /* * Now start the synchronous reading of the file page into the buffer. If * all of the pages lie beyond the EOF, then suppress the read. */ if (rd) { if (fileaddr < cch_info->feof) { int by_tran; fb->sw.sw_flag = 0; /* indicate I/O in progress */ ret = _cch_rdabuf(cch_info, llfio, fb, BITS2BYTES(bs), BITS2BYTES(fb->filead), nblk, 's',stat); if (ret == ERR) return((struct cch_buf *)NULL); /* * Zero portions of the buffers past the end of file. */ by_tran = fb->sw.sw_count; #ifdef CCH_SDS_SUPPORTED if (cch_info->optflags & CCHOPT_SDS) { int ret; ret = _sdsset( (BPTR2CP(fb->buf) - (char*)NULL) + by_tran, 0, nblk * BITS2BYTES(bs) - by_tran); if (ret == ERR) { _SETERROR(stat, errno, 0); return((struct cch_buf *)NULL); } } else #endif { if ((nblk*BITS2BYTES(bs)-by_tran) != 0) (void)memset(BPTR2CP( fb->buf) + by_tran, 0, nblk * BITS2BYTES(bs) - by_tran); } for (i=0; i<nblk; i++) { fb[i].flags |= CCH_ZEROED; } } else { /* page lies beyond EOF */ /* * Zero the entire buffer. */ #ifdef CCH_SDS_SUPPORTED if (cch_info->optflags & CCHOPT_SDS) { int ret; ret = _sdsset( (BPTR2CP(fb->buf) - (char*)NULL), 0, nblk * BITS2BYTES(bs)); if (ret == ERR) { _SETERROR(stat, errno, 0); return((struct cch_buf *)NULL); } for (i=0; i<nblk; i++) { fb[i].flags |= CCH_ZEROED; } } else #endif if (fileaddr < cch_info->fsize){ /* this block is between cch_info->feof and */ /* cch_info->fsize, so we must zero it */ /* Logic in other parts of this layer will */ /* only zero what is beyond cch_info->fsize */ #ifdef _CRAY1 wptr = BPTR2WP(fb->buf); limit = (nblk * bs) >> 6; /* convert to words */ /* this loop vectorizes! */ for (i=0; i<limit; i++) wptr[i] = 0; #else memset(BPTR2CP(fb->buf), 0, (nblk * BITS2BYTES(bs))); #endif for (i=0; i<nblk; i++) { fb[i].flags |= CCH_ZEROED; } } } }
/* * Read a tape file. * With this format, one tape block == one record. * Parameters: * fio Pointer to fdinfo block * bufptr bit pointer to where data is to go. * nbytes Number of bytes to be read * stat pointer to status return word * fulp full or partial read mode flag * ubc pointer to unused bit count (not used this class of file) * */ ssize_t _tmf_read( struct fdinfo *fio, bitptr bufptr, size_t nbytes, struct ffsw *stat, int fulp, int *ubc) { register int errn; /* Error code */ size_t bytes; /* Number of bytes actually read */ ssize_t ret; /* Return status */ struct tmfio *xf_info; xf_info = (struct tmfio *)fio->lyr_info; if (*ubc != 0) { errn = FDC_ERR_UBC; goto eret; } if ((BPBITOFF(bufptr) & 07) != 0) { errn = FDC_ERR_REQ; goto eret; } if (xf_info->rwflag == WRITIN && !xf_info->tmf_speov) { /* Read after write is OK in special processing */ /* otherwise, it is an error. */ errn = FDC_ERR_RAWR; goto eret; } if (xf_info->tmf_tpos) { if (_tmf_tpwait(xf_info) < 0) { errn = errno; goto eret; } } xf_info->rwflag = READIN; if (xf_info->tmf_speov) { /* If we ever do async i/o, then we want to read */ /* synchronously while in special processing. */ if (xf_info->spblocks == 0) { /* We're not reading from the tape. */ /* We're reading from buffer memory */ goto spprocread; } } /* * If the number of bytes to read is zero and reading in full * record mode, skip to the end of record. If in partial * record mode, the position remains as is. * Are we at the beginning of the record? */ if (xf_info->tmf_cnt == 0) { /* * We are at the beginning of the record. Must read * from tape. Even if the user has requested a FULL * record, we need to read it in to the buffer, unless * the request is for bufsiz bytes. * That is because, if the user requests less than * is actually in the tape block, the read would * give an error. */ if (nbytes == xf_info->tmf_bufsiz) { LOOP_SYSCALL(ret,read(xf_info->tmf_fd, BPTR2CP(bufptr), xf_info->tmf_bufsiz)); bytes = ret; } else { LOOP_SYSCALL(ret,read(xf_info->tmf_fd, xf_info->tmf_bufptr, xf_info->tmf_bufsiz)); if (ret > 0) { bytes = MIN(ret, nbytes); (void) memcpy(BPTR2CP(bufptr), xf_info->tmf_bufptr, bytes); } } if (ret > 0) { if (ret == nbytes) { SETSTAT(stat, FFEOR, bytes); if (xf_info->spblocks > 0) xf_info->spblocks--; return(bytes); } else if (fulp == FULL) { SETSTAT(stat, FFCNT, bytes); if (xf_info->spblocks > 0) xf_info->spblocks--; return(bytes); } else { SETSTAT(stat, FFCNT, bytes); xf_info->tmf_bufptr += bytes; xf_info->tmf_cnt = ret - bytes; } } else if (ret == 0) { if (ioctl(xf_info->tmf_fd,TMFC_EOD,0)) { SETSTAT(stat, FFEOF, 0); return(0); } SETSTAT(stat, FFEOD, 0); return(0); } else { /* Could be EOV, or an error */ if (errno == ENOSPC) { /* If we hit physical eov, */ /* return an error */ /* and set eovhit so that */ /* checktp will tell us we */ /* hit eov */ /* JAS - should document that ffread will return ENOSPC at eov */ /* also Fortran read will return an error */ if (xf_info->tmf_eovon) xf_info->tmf_eovhit = 1; ERETURN(stat, errno, 0); } else { ERETURN(stat, errno, 0); } } } else { /* * We are in the middle of a record. The entire record, * and nothing more, is already in our buffer. */ bytes = MIN(nbytes, xf_info->tmf_cnt); (void) memcpy(BPTR2CP(bufptr), xf_info->tmf_bufptr, bytes); xf_info->tmf_cnt -= bytes; xf_info->tmf_bufptr += bytes; if (xf_info->tmf_cnt == 0) { xf_info->tmf_bufptr = xf_info->tmf_base; if (xf_info->spblocks > 0) xf_info->spblocks--; SETSTAT(stat, FFEOR, bytes); } else { SETSTAT(stat, FFCNT, bytes); if (fulp == FULL) { if (xf_info->spblocks > 0) xf_info->spblocks--; xf_info->tmf_bufptr = xf_info->tmf_base; xf_info->tmf_cnt = 0; } } } return (bytes); eret: ERETURN(stat, errn, 0); spprocread: /* We're reading from buffer memory. */ /* Right now, we've got only 1 block in buffer memory. */ if (xf_info->tmf_tpmk) { xf_info->tmf_tpmk = 0; SETSTAT(stat, FFEOF, 0); return(0); } else { if (xf_info->eovbytes == 0) { SETSTAT(stat, FFEOD, 0); return(0); } bytes = MIN(xf_info->eovbytes, nbytes); (void) memcpy(BPTR2CP(bufptr), xf_info->eovbuf, bytes); if (fulp == FULL || (nbytes >= xf_info->eovbytes)) { xf_info->eovbytes = 0; SETSTAT(stat, FFEOR, bytes); } else { xf_info->eovbytes -= nbytes; xf_info->eovbuf += nbytes; SETSTAT(stat, FFCNT, bytes); } return(bytes); } }
/* * _cch_wrabuf * * Flushes to the underlying ffio layer one or more cache page buffers. * The ffsw structure pointed to by stat receives the synchronous or * asynchronous completion status. * If the lastdata or firstdata fields in the buffer control block * are non-zero, then nblk had better be 1. In this case, we write only * the part of the buffer between firstdata and lastdata. * * When nblk > 1, _cch_wrabuf assumes that all page buffers get written to * contiguous parts of the file. * * Return value: * * On normal completion 0 is returned. -1 is returned if an error is * encountered, with the status set in stat. * * Side effects: * * The CCH_DIRTY bit is cleared for all affected buffers. If called * in asynchronous mode, the buffers are placed in CCH_WRITING state. */ _cch_wrabuf( struct cch_f *cch_info, /* cache info */ struct fdinfo *llfio, /* fdinfo pointer for underlying layer */ struct cch_buf *bc, /* buffer control block */ int bytes, /* number of bytes per page buffer */ off_t bytoff, /* byte offset within file */ int64 nblk, /* number of contiguous buffers to flush */ off_t *eof, /* on input, contains the bit size of the * underlying file layer. On output, this * size is updated if the file is extended. */ char syncasync, /* 's' for sync request, 'a' for async */ struct ffsw *stat /* io completion status structure */ ) { int i; ssize_t ret; int ubc; size_t tbytes, bytleft; size_t saveamt = 0; off_t end_of_data; struct fflistreq list_array[1]; char *bufptr; CCH_DEBUG(("_cch_wrabuf EN: bytes=%d (0%o) bytoff=%d (0%o) \n", bytes,bytes,bytoff,bytoff)); if (bc->firstdata || bc->lastdata) { assert(nblk <= 1); tbytes = (size_t)((bc->lastdata - bc->firstdata)/8); bytoff = bytoff + bc->firstdata/8; bufptr = BPTR2CP(bc->buf) + bc->firstdata/8; } else { tbytes = bytes * nblk; bufptr = BPTR2CP(bc->buf); } #ifdef __mips if (cch_info->odirect && tbytes > cch_info->maxiosize){ syncasync = 's'; } #endif ubc = 0; if (syncasync == 'a') { /* * Seek to proper location */ if (XRCALL(llfio,seekrtn) llfio, bytoff, SEEK_SET, stat) == ERR) return(ERR); /* * Start an asynchronous write. */ CLRFFSTAT(bc->sw); ret = XRCALL(llfio,writeartn) llfio, CPTR2BP(bufptr), tbytes, &bc->sw, PARTIAL, &ubc); if (ret == ERR) { ERETURN(stat,bc->sw.sw_error,0); } bc[0].lnkcnt = nblk; /* cnt of linked bufs */ for (i=0; i<nblk; i++) { bc[i].flags |= CCH_WRITING; /* update buffer stat */ bc[i].flags &= ~CCH_DIRTY; /* clear dirty flag */ bc[i].lnk = i; /* chain several bufs */ } } else {
/* * This is the tape layer for Irix systems. * When the tape is in variable block mode, each user's record * corresponds to a block on tape. * This is accomplished by writing the record with 1 write statement. * The tape layer's buffer is big enough to hold 1 record. * If we get a full write, and nothing else is in the buffer for * this record, we can skip copying to the library buffer, and write it * directly from the user's space. * If we get a partial write, we need to copy to the library buffer. * Parameters: * fio - Pointer to fdinfo block * bufptr - bit pointer to where data is to go. * nbytes - Number of bytes to be written * stat - pointer to status return word * fulp - full or partial write mode flag * ubc - pointer to unused bit count (not used for IBM) */ ssize_t _tmf_write( struct fdinfo *fio, bitptr bufptr, size_t nbytes, struct ffsw *stat, int fulp, int *ubc) { register int errn; ssize_t ret; struct tmfio *xfinfo; if ((BPBITOFF(bufptr) & 07) != 0) { errn = FDC_ERR_REQ; goto eret; } if (*ubc != 0) { errn = FDC_ERR_UBC; goto eret; } xfinfo = (struct tmfio *)fio->lyr_info; /* * If we've been reading, then try to switch the buffer into write mode. */ if (xfinfo->rwflag == READIN) { /* * Issue an error if we are not positioned at a record * boundary. ffweof would terminate the current record, but * _cos_write overwrites the current record. We need to * decide which is the proper approach before permitting this * here. */ if (xfinfo->tmf_base != xfinfo->tmf_bufptr) { errn = FDC_ERR_NOTREC; goto eret; } ret = _tmf_wrard(fio, stat); if (ret < 0) return(ERR); } if (xfinfo->tmf_tpos) { if (_tmf_tpwait(xfinfo) < 0) { ERETURN(stat, errno, 0); } } xfinfo->rwflag = WRITIN; if (xfinfo->tmf_speov) { /* We're in special processing. */ /* Reset counter of blocks on tape */ xfinfo->spblocks = 0; /* If we've read anything from buffer memory, then mark it */ /* all gone. - for now we don't need to worry about */ /* this, because we can only have 1 block in buffer memory. */ /* But if we every do async i/o, this could be a problem. */ } if ((xfinfo->tmf_bufptr == xfinfo->tmf_base) && fulp == FULL) { /* * This is the entire record, so just write it out */ LOOP_SYSCALL(ret, write(xfinfo->tmf_fd, BPTR2CP(bufptr), nbytes)); if (ret != nbytes) { if (xfinfo->tmf_eovon && !xfinfo->tmf_speov) { /* The user has enabled eov processing. */ /* Determine whether we hit EOV. */ int err; if (_tmf_ateov(fio,xfinfo, nbytes, BPTR2CP(bufptr), ret, stat, &err)) { /* This is eov */ /* We need to save away the */ /* unwritten part of the data, */ /* and set a flag so we can */ /* tell the user eov was reached. */ /* This user's write will return */ /* a good status. */ return(_tmf_eovseen(xfinfo, nbytes, BPTR2CP(bufptr), ret, stat)); } if (err != 0) { ERETURN(stat, err, ret); } /* We were able to rewrite the block. */ /* Carry on. */ } else { if (ret < 0) { ERETURN(stat, errno, 0); } else{ ERETURN(stat, FDC_ERR_WRTERR, ret); } } } SETSTAT(stat, FFEOR, ret); return(ret); } /* * This must not be the entire record. So, we need to copy it * to our library buffer. */ if (nbytes + xfinfo->tmf_cnt > xfinfo->tmf_bufsiz) { ERETURN(stat, FDC_ERR_MXBLK, 0); } memcpy(xfinfo->tmf_bufptr, BPTR2CP(bufptr), nbytes); xfinfo->tmf_cnt += nbytes; xfinfo->tmf_bufptr += nbytes; if (fulp == FULL) { LOOP_SYSCALL(ret, write(xfinfo->tmf_fd, xfinfo->tmf_base, xfinfo->tmf_cnt)); xfinfo->tmf_bufptr = xfinfo->tmf_base; if (ret != xfinfo->tmf_cnt) { if (xfinfo->tmf_eovon && !xfinfo->tmf_speov) { int err; if (_tmf_ateov(fio,xfinfo, xfinfo->tmf_cnt, xfinfo->tmf_base, ret, stat, &err)) { /* This is eov */ /* We need to save away the */ /* unwritten part of the data, */ /* and set a flag so we can */ /* tell the user eov was reached. */ /* This write will return OK */ return(_tmf_eovseen(xfinfo, xfinfo->tmf_cnt, xfinfo->tmf_base, ret, stat)); } if (err != 0) { ERETURN(stat, err, ret); } /* We were able to rewrite the block. */ /* Carry on. */ } else { xfinfo->tmf_cnt = 0; if (ret < 0) { ERETURN(stat, errno, 0); } else{ ERETURN(stat, FDC_ERR_WRTERR, ret); } } } xfinfo->tmf_cnt = 0; SETSTAT(stat, FFEOR, nbytes); return(nbytes); } else { SETSTAT(stat, FFCNT, nbytes ); return(nbytes); } eret: ERETURN(stat, errn, 0); }
_ffopen_t _gen_fopen( const char *name, int flags, mode_t mode, struct fdinfo *fio, union spec_u *spec, struct ffsw *stat, long cbits, int cblks, struct gl_o_inf *oinf) { char *ptr; union spec_u *nspec; long recsize, blksize; /* bits */ long rsz, mbs; /* bytes */ _ffopen_t nextfio; int rtype; struct gen_ff *ff_dat; /* * convert 8-bit bytes to bits */ rsz = spec->fld.recsize; mbs = spec->fld.mbs; rtype = spec->fld.recfmt; if (rtype < 0 || rtype >= NUM_F_TYPES) { _SETERROR(stat, FDC_ERR_BADSPC, 0); return(_FFOPEN_ERR); } /* * General limit checks from table. */ if (rsz == 0) { _SETERROR(stat, FDC_ERR_BADSPC, 0); goto badret; } if (rsz < _F_limits[rtype].min_rsz || rsz > _F_limits[rtype].max_rsz) { _SETERROR(stat, FDC_ERR_BADSPC, 0); goto badret; } if (mbs != 0) if (mbs < _F_limits[rtype].min_mbs || mbs > _F_limits[rtype].max_mbs) { _SETERROR(stat, FDC_ERR_BADSPC, 0); goto badret; } switch(rtype) { case TR_IBM_F: /* * if mbs and rsz specified with * F format and mbs != rsz then error */ if (mbs != rsz && mbs != 0) { _SETERROR(stat, FDC_ERR_BADSPC, 0); goto badret; } case TR_IBM_FB: if (mbs == 0) mbs = rsz; /* dflt mbs = rsz */ /* must be exact multiple */ if ((mbs % rsz) != 0) { _SETERROR(stat, FDC_ERR_BADSPC, 0); goto badret; } break; case TR_VMS_F_DSK: case TR_VMS_F_TP: case TR_VMS_F_TR: if (mbs == 0) /* unspecified */ { /* deflt mbs=rsz */ if (rtype != TR_VMS_F_TP) /* deflt mbs=rsz */ mbs = rsz; else if(rtype == TR_VMS_F_TP) { /* dflt mbs=2048 */ mbs = 2048; if (rsz > mbs) mbs = rsz; } } if (rsz > mbs) { _SETERROR(stat, FDC_ERR_BADSPC, 0); goto badret; } break; default: _SETERROR(stat, FDC_ERR_BADSPC, 0); goto badret; } recsize = rsz << 3; blksize = mbs << 3; /* * Internally, both blksize and recsize are in bits! */ fio->maxrecsize = recsize; fio->maxblksize = blksize; fio->_ffbufsiz = blksize; /* bit size of buffer */ /* * Allocate buffer */ ptr = malloc((blksize >> 3) + 16); if (ptr == NULL) goto nomem; /* * Allocate private data area */ fio->lyr_info = (char *)calloc(sizeof(struct gen_ff), 1); if (fio->lyr_info == NULL) goto nomem; /* load up record characteristics */ ff_dat = (struct gen_ff *)fio->lyr_info; *ff_dat = _Frec_def_tab[rtype]; SET_BPTR(fio->_base, CPTR2BP(ptr)); fio->rwflag = POSITIN; fio->segbits = 0; fio->_cnt = 0; fio->_ptr = fio->_base; /* * First, open the lower layers */ nspec = spec; NEXT_SPEC(nspec); nextfio = _ffopen(name, flags, mode, nspec, stat, cbits, cblks, NULL, oinf); if (nextfio == _FFOPEN_ERR) goto badret; DUMP_IOB(fio); /* debugging only */ return(nextfio); nomem: _SETERROR(stat, FDC_ERR_NOMEM, 0); badret: if (BPTR2CP(fio->_base) != NULL) free(BPTR2CP(fio->_base)); if (fio->lyr_info != NULL) free(fio->lyr_info); return(_FFOPEN_ERR); }
/* * Write a f77 class file * Parameters: * fio - Pointer to fdinfo block * bufptr - bit pointer to where data is to go. * nbytes - Number of bytes to be written * stat - pointer to status return word * fulp - full or partial write mode flag * ubc - pointer to unused bit count (not used for IBM) */ ssize_t _f77_xwrite( struct fdinfo *fio, bitptr bufptr, size_t nbytes, struct ffsw *stat, int fulp, int *ubc) { ssize_t ret; size_t bytomove, moved, bytes; struct f77_xf *xfinfo; struct fflistreq list_array[1]; long left; char *cbufptr; long ii; char *cb; int ijk; int cwbytes; int ernum; int zero = 0; cbufptr = BPTR2CP(bufptr); if ((BPBITOFF(bufptr) & 07) != 0) { ernum = FDC_ERR_REQ; goto eret; } if (*ubc != 0){ ernum = FDC_ERR_UBC; goto eret; } xfinfo = (struct f77_xf *)fio->lyr_info; /* * If we've been reading, then try to switch the buffer into write mode. */ if (fio->rwflag == READIN) { /* * Issue an error if we are not positioned at a record * boundary. ffweof would terminate the current record, but * _cos_write overwrites the current record. We need to * decide which is the proper approach before permitting this * here. */ if (!(xfinfo->flag & ATEOR) && !(fio->ateod ) && !(fio->ateof)) { ernum = FDC_ERR_NOTREC; goto eret; } ret = f77_xwrard(fio, stat); if (ret < 0) return(ERR); } fio->rwflag = WRITIN; /* * initialize a new record, if needed. */ bytomove = nbytes; moved = 0; /* * Check for record size exceeded. */ if (bytomove > 0) { if ((xfinfo->maxrecsize > 0) && (xfinfo->recbytes + bytomove) > xfinfo->maxrecsize){ ernum = FDC_ERR_MXREC; goto eret; } } if (xfinfo->recbytes == 0) { /* This is the start of the record */ ii = bytomove; if (fio->rtype == TR_UX_MIPS) {SWAPB(ii);} if ((bytomove > 0) || (fulp == FULL)) { /* * Put our guess at a control word in the buffer. * This is the control word at the beginning of record. */ cwbytes = RDWLEN; cb = (char *)ⅈ #if !(defined(_LITTLE_ENDIAN) && defined(_LP64)) cb += sizeof(ii) - RDWLEN; /* The control word is only RDWLEN bytes long */ #endif if ((xfinfo->_cnt + RDWLEN) >= xfinfo->_ffbufsiz) { /* only part of the control word will fit */ /* in this buffer. Insert what will fit. */ for (ijk = 0; ijk < xfinfo->_ffbufsiz - xfinfo->_cnt; ijk++){ *(xfinfo->_ptr++) = *cb++; cwbytes--; } /* buffer is full. write it out. */ if (_f77_put_block(fio, stat, (size_t)xfinfo->_ffbufsiz) != 0) return(ERR); } for (ijk = 0; ijk < cwbytes; ijk++){ *(xfinfo->_ptr++) = *cb++; } xfinfo->_cnt += cwbytes; xfinfo->recbytes += RDWLEN; xfinfo->cwwritten = 1; } } else { /* This record has already been started. */ ii = (xfinfo->recbytes + bytomove - RDWLEN) ; if (fio->rtype == TR_UX_MIPS) {SWAPB(ii);} if (bytomove != 0) { /* * If the control word at the start of the * record is in the buffer, update it. */ if (xfinfo->recbytes <= xfinfo->_cnt){ char *tbptr; /* the whole control word is in the buffer */ cb = (char *)ⅈ #if !(defined(_LITTLE_ENDIAN) && defined(_LP64)) cb += sizeof(ii) - RDWLEN; /* The control word is only RDWLEN bytes long */ #endif tbptr = xfinfo->_ptr - xfinfo->recbytes; for (ijk = 0; ijk < RDWLEN; ijk++) *(tbptr++) = *cb++; xfinfo->cwwritten = 1; } else if ((xfinfo->recbytes - RDWLEN) <= xfinfo->_cnt){ char *tbptr; int istart; /* part of the control word is in the buffer */ /* Insert what will fit. */ cb = (char *)ⅈ #if !(defined(_LITTLE_ENDIAN) && defined(_LP64)) cb += sizeof(ii) - RDWLEN; /* The control word is only RDWLEN bytes long */ #endif istart = xfinfo->recbytes -xfinfo->_cnt; cb += istart; tbptr = xfinfo->_base; for (ijk = istart; ijk < RDWLEN; ijk++) *(tbptr++) = *cb++; xfinfo->cwwritten = 0; /* 0 because this is */ /* not the whole thing*/ } else xfinfo->cwwritten = 0; } } /* * loop putting data in buffer */ while (bytomove > 0) { /* * bytes tells when data has been moved. Set it to zero * unless someone moves some data in the loop */ /* * If enough room for bytes, put them in the buffer */ left = xfinfo->_ffbufsiz - xfinfo->_cnt; if (left == 0) { if (_f77_put_block(fio, stat, (size_t)xfinfo->_cnt) != 0) return(ERR); left = xfinfo->_ffbufsiz; #ifdef __CRAY #pragma _CRI inline _f77_aligned #elif defined(__mips) || defined(_LITTLE_ENDIAN) #pragma inline _f77_aligned #endif if ((bytomove >= left) && _f77_aligned(cbufptr)) { /* We write directly from the user's buffer */ bytes = bytomove - bytomove%xfinfo->_ffbufsiz; ret = XRCALL(fio->fioptr, writertn) fio->fioptr, CPTR2BP(cbufptr), bytes, stat, PARTIAL, &zero); if (ret != bytes){ return(ERR); } bytomove -= bytes; cbufptr += bytes; moved += bytes; } } bytes = (bytomove < left)? bytomove : left; memcpy(xfinfo->_ptr, cbufptr, bytes); xfinfo->_cnt += bytes; xfinfo->_ptr += bytes; cbufptr += bytes; bytomove -= bytes; moved += bytes; }
/* * _any_sds_fr_mem moves data into a secondary data segment (SDS) from * user memory * * unlike _sds_fr_mem, _any_sds_fr_mem handles moving a number of bits that * may not be a multiple of 512. * * Returns 0 on normal return, or else -1 with error code in errno. */ _any_sds_fr_mem( bitptr sdsaddr, /* SDS bit address of data */ bitptr ubuf, /* user buffer to receive data */ int nbits /* number of bits to move */ ) { int sds_bit_offset; int sds_bit_offset_blk; int rbits; char localbuf[BYTPBLOCK]; bitptr locptr; long *uwaddr; char *ucaddr; sds_bit_offset = SUBT_BPTR(sdsaddr, WPTR2BP(0)); if (sds_bit_offset & (BITPBLOCK -1)) { /* The sds address is not on a block boundary. */ /* Read data from sds to a local buffer. Copy the */ /* user's memory to the appropriate part of the local */ /* buffer, and write it back out to sds. */ sds_bit_offset_blk = (sds_bit_offset & ~(BITPBLOCK - 1)); if (ssread((int *)localbuf, BITS2BLOCKS(sds_bit_offset_blk), 1) == -1) { errno = FDC_ERR_SDSIO; return(-1); } rbits = MIN(nbits, BITPBLOCK - (sds_bit_offset - sds_bit_offset_blk)); locptr = CPTR2BP(localbuf); SET_BPTR(locptr, INC_BPTR(locptr, sds_bit_offset - sds_bit_offset_blk)); MOV_BITS(locptr, ubuf, rbits); SET_BPTR(ubuf, INC_BPTR(ubuf, rbits)); nbits -= rbits; if(sswrite((int *)localbuf, BITS2BLOCKS(sds_bit_offset_blk), 1) == -1) { errno = FDC_ERR_SDSIO; return(-1); } SET_BPTR(sdsaddr, INC_BPTR(sdsaddr, rbits)); if (nbits == 0) return(0); assert(((SUBT_BPTR(sdsaddr, WPTR2BP(0))) & (BITPBLOCK -1)) == 0); } sds_bit_offset = SUBT_BPTR(sdsaddr, WPTR2BP(0)); uwaddr = BPTR2WP(ubuf); ucaddr = BPTR2CP(ubuf); if ((nbits & (BITPBLOCK-1)) || (ucaddr != (char *)uwaddr)){ int left; locptr = CPTR2BP(localbuf); /* round down nbits to a block boundary */ rbits = nbits & ~(BITPBLOCK-1); if (rbits) { if (ucaddr != (char*)uwaddr) { /* ubuf is not word aligned. */ left = rbits; sds_bit_offset_blk = BITS2BLOCKS(sds_bit_offset); while (left > 0) { if( ssread((int *)localbuf, sds_bit_offset_blk, 1) == -1) { errno = FDC_ERR_SDSIO; return(-1); } MOV_BITS(locptr, ubuf, BITPBLOCK); SET_BPTR(ubuf, INC_BPTR(ubuf, BITPBLOCK)); if( sswrite((int *)localbuf, sds_bit_offset_blk, 1) == -1) { errno = FDC_ERR_SDSIO; return(-1); } SET_BPTR(sdsaddr, INC_BPTR(sdsaddr, BITPBLOCK)); sds_bit_offset_blk++; left-= BITPBLOCK; } } else { if (_sds_fr_mem(sdsaddr, ubuf, rbits) == -1) { return(-1); } SET_BPTR(ubuf, INC_BPTR(ubuf, rbits)); SET_BPTR(sdsaddr, INC_BPTR(sdsaddr, rbits)); } sds_bit_offset = SUBT_BPTR(sdsaddr, WPTR2BP(0)); } /* Get last block into local memory. Merge in user's memory */ /* and write it back out to sds. */ if( ssread((int *)localbuf, BITS2BLOCKS(sds_bit_offset), 1) == -1) { errno = FDC_ERR_SDSIO; return(-1); } MOV_BITS(locptr, ubuf, nbits - rbits); if( sswrite((int *)localbuf, BITS2BLOCKS(sds_bit_offset), 1) == -1) { errno = FDC_ERR_SDSIO; return(-1); } } else { if(sswrite(uwaddr, BITS2BLOCKS(sds_bit_offset), BITS2BLOCKS(nbits)) == -1) { errno = FDC_ERR_SDSIO; return(-1); } } return(0); }
/* * _any_mem_fr_sds moves data into user memory from a secondary data segment (SDS). * * unlike _mem_fr_sds, _any_mem_fr_sds handles moving a number of bits that * may not be a multiple of 512. * * Returns 0 on normal return, or else -1 with error code in errno. */ _any_mem_fr_sds( bitptr ubuf, /* user buffer to receive data */ bitptr sdsaddr, /* SDS bit address of data */ int nbits /* number of bits to move */ ) { int sds_bit_offset; int sds_bit_offset_blk; int rbits; char localbuf[BYTPBLOCK]; bitptr locptr; long *uwaddr; char *ucaddr; sds_bit_offset = SUBT_BPTR(sdsaddr, WPTR2BP(0)); if (sds_bit_offset & (BITPBLOCK -1)) { /* The sds address is not on a block boundary. */ /* Read data from sds to a local buffer. Copy the */ /* appropriate part of the local buffer to user's memory. */ sds_bit_offset_blk = (sds_bit_offset & ~(BITPBLOCK - 1)); if(ssread((int *)localbuf, BITS2BLOCKS(sds_bit_offset_blk), 1) == -1) { errno = FDC_ERR_SDSIO; return(-1); } rbits = MIN(nbits, BITPBLOCK - (sds_bit_offset - sds_bit_offset_blk)); locptr = CPTR2BP(localbuf); SET_BPTR(locptr, INC_BPTR(locptr, sds_bit_offset - sds_bit_offset_blk)); MOV_BITS(ubuf, locptr, rbits); SET_BPTR(ubuf, INC_BPTR(ubuf, rbits)); nbits -= rbits; SET_BPTR(sdsaddr, INC_BPTR(sdsaddr, rbits)); if (nbits == 0) return(0); /* Verify that our sds address is now on a block boundary */ assert (((SUBT_BPTR(sdsaddr, WPTR2BP(0))) & (BITPBLOCK -1)) == 0); } sds_bit_offset = SUBT_BPTR(sdsaddr, WPTR2BP(0)); uwaddr = BPTR2WP(ubuf); ucaddr = BPTR2CP(ubuf); if ((nbits & (BITPBLOCK-1)) || (ucaddr != (char *)uwaddr)){ int left; /* Either we are not reading in a multiple of blocks or */ /* the user's address is not word-aligned. */ /* Round nbits down to a block boundary and */ /* move those to user's memory. */ locptr = CPTR2BP(localbuf); rbits = nbits & ~(BITPBLOCK-1); if (rbits) { if (ucaddr != (char*)uwaddr) { /* ubuf is not word aligned. */ /* Read the data from sds into a local */ /* buffer and copy to the user's memory */ left = rbits; sds_bit_offset_blk = BITS2BLOCKS(sds_bit_offset); while (left > 0) { if (ssread((int *)localbuf, sds_bit_offset_blk, 1) == -1) { errno = FDC_ERR_SDSIO; return(-1); } MOV_BITS(ubuf, locptr, BITPBLOCK); SET_BPTR(ubuf, INC_BPTR(ubuf, BITPBLOCK)); SET_BPTR(sdsaddr, INC_BPTR(sdsaddr, BITPBLOCK)); sds_bit_offset_blk++; left-= BITPBLOCK; } } else { if (ssread(uwaddr, BITS2BLOCKS(sds_bit_offset), BITS2BLOCKS(rbits)) == -1) { errno = FDC_ERR_SDSIO; return(-1); } SET_BPTR(ubuf, INC_BPTR(ubuf, rbits)); SET_BPTR(sdsaddr, INC_BPTR(sdsaddr, rbits)); } sds_bit_offset = SUBT_BPTR(sdsaddr, WPTR2BP(0)); } /* get last block into local memory and */ /* transfer to user's memory */ if (ssread((int *)localbuf, BITS2BLOCKS(sds_bit_offset), 1) == -1) { errno = FDC_ERR_SDSIO; return(-1); } assert((nbits - rbits) < BITPBLOCK); MOV_BITS(ubuf, locptr, nbits - rbits); } else { if(ssread(uwaddr, BITS2BLOCKS(sds_bit_offset), BITS2BLOCKS(nbits)) == -1) { errno = FDC_ERR_SDSIO; return(-1); } } return(0); }
ssize_t _sys_write( struct fdinfo *fio, bitptr bufptr, size_t nbytes, struct ffsw *retstat, int fulp, int *ubc) { ssize_t ret; ssize_t nbt = 0; /* number of bytes transferred so far */ size_t nbreq; /* number of bytes requested this request */ char *buf; buf = BPTR2CP(bufptr); if ((BPBITOFF(bufptr) & 7) != 0 || *ubc != 0) ERETURN(retstat, FDC_ERR_UBC, 0); nbreq = nbytes; #ifdef __mips /* * If our last i/o was asynchronous, then our file position * won't be what we expect. Seek to the right position. We * could use a pwrite instead of seeking, but that would also * not update the file position. I'm doing this because it seems * to me most 'expected' for the system call layer. */ if (((struct sys_f *)fio->lyr_info)->needpos) { if (lseek( fio->realfd, ((struct sys_f *)fio->lyr_info)->curpos, 0) < 0) ERETURN(retstat, errno, nbt); ((struct sys_f *)fio->lyr_info)->needpos = 0; } #endif if (nbreq > 0) { #ifdef __mips if (((struct sys_f *)fio->lyr_info)->oappend) { ((struct sys_f *)fio->lyr_info)->curpos = ((struct sys_f *)fio->lyr_info)->endpos; } #endif again: if (((struct sys_f *)fio->lyr_info)->nointrio) ret = write(fio->realfd, buf, nbreq); else { LOOP_SYSCALL(ret, write(fio->realfd, buf, nbreq)); } if (ret < 0) ERETURN(retstat, errno, nbt); #ifdef __mips ((struct sys_f *)fio->lyr_info)->curpos += ret; if (((struct sys_f *)fio->lyr_info)->curpos > ((struct sys_f *)fio->lyr_info)->endpos) ((struct sys_f *)fio->lyr_info)->endpos = ((struct sys_f *)fio->lyr_info)->curpos; #endif nbt += ret; /* * The assumption is made here that the system will never return * zero bytes on a non-zero request without an error! */ if (nbt < nbytes) { buf += ret; nbreq -= ret; goto again; } } SETSTAT(retstat, FFCNT, nbt); return (nbt); }
_ffopen_t _gen_xopen( const char *name, int flags, mode_t mode, struct fdinfo *fio, union spec_u *spec, struct ffsw *stat, long cbits, int cblks, struct gl_o_inf *oinf ) { char *ptr; union spec_u *nspec; int blksize; _ffopen_t nextfio; int isvalid; struct gen_xf *xf_info; /* * Allocate private storage */ xf_info = (struct gen_xf *)calloc(sizeof(struct gen_xf),1); if (xf_info == NULL) goto nomem; fio->lyr_info = (char *)xf_info; /* * select parameters based on record type */ switch(fio->rtype) { case TR_NVE_V: xf_info->rdwlen = 112; /* bits */ break; case TR_CRAY_V: xf_info->rdwlen = 64; /* bits */ break; #ifdef _OLD_F77 case TR_UX_VAX: case TR_UX_SUN: xf_info->rdwlen = 32; /* bits */ break; #endif case TR_205_W: xf_info->rdwlen = 64; /* bits */ break; } xf_info->last_lrdwaddr = 0; xf_info->lrdwaddr = 0; /* * Record the maximum record size in bits. * A value of 0 is stored if this is unspecified. */ fio->maxrecsize = _ff_nparm_getv(spec, 1, &isvalid) * 8; /* * Record the buffer size in bits. */ blksize = _ff_nparm_getv(spec, 2, &isvalid) * 8; if (! isvalid || blksize < 256) /* bits, mighty small! */ blksize = X_BUFSIZ * BITPBLOCK; else blksize = (blksize + 077) & (~077);/* round to word size */ /* * Although the _ffbufsiz field is declared as long, * these routines use GETDATA and PUTDATA. Those macros * assign the amount to be written to integers. So, to * make this all work we need to be sure that the buffer size * does not exceed the size of an integer. */ if (blksize > (1<<sizeof(int)*8-5)){ _SETERROR(stat, FDC_ERR_BUFSIZ, 0); goto badret; } fio->_ffbufsiz = blksize; /* bit size of buffer */ ptr = malloc((blksize >> 3) + 16); if (ptr == NULL) goto nomem; SET_BPTR(fio->_base, CPTR2BP(ptr)); fio->scc = SCCFULL; fio->lastscc = SCCFULL; fio->rwflag = POSITIN; fio->segbits = 0; fio->_cnt = 0; fio->_ptr = fio->_base; /* * Now, open the lower layers */ nspec = spec; NEXT_SPEC(nspec); nextfio = _ffopen(name, flags, mode, nspec, stat, cbits, cblks, NULL, oinf); if (nextfio < 0) goto badret; DUMP_IOB(fio); /* debugging only */ return(nextfio); nomem: _SETERROR(stat, FDC_ERR_NOMEM, 0); badret: if (BPTR2CP(fio->_base) != NULL) free(BPTR2CP(fio->_base)); if (fio->lyr_info != NULL) free(fio->lyr_info); return(_FFOPEN_ERR); }