ffreada(int fd, char *buf, int nbytes, struct ffsw *stat, ...) { struct fdinfo *fio; int ret, locfulp; bitptr bufptr; int locubc, *pubc, na; /* need a place to put result */ va_list ap; fio = GETIOB(fd); SET_BPTR(bufptr, CPTR2BP(buf)); /* adjust number of bits requested if ubc passed in */ NUMARG(na); locubc = 0; pubc = &locubc; locfulp = FULL; if (na < 4 || na > 6) { errno = FDC_ERR_NOPARM; return(ERR); } va_start(ap, stat); if (na > 4) locfulp = va_arg(ap, int); if (na > 5) pubc = va_arg(ap, int *); CHECK_FIOPTR(fio, stat); ret = XRCALL(fio, readartn) fio, bufptr, nbytes, stat, locfulp, pubc); return (ret); }
int ffread(int fd, char *buf, int nbytes, ...) #endif { struct fdinfo *fio; ssize_t ret; int locfulp; bitptr bufptr; int locubc, *pubc, na; /* need a place to put result */ struct ffsw locstat, *pstat; /* need a place to put result */ #if !defined(__mips) && !defined(_LITTLE_ENDIAN) va_list ap; #endif fio = GETIOB(fd); SET_BPTR(bufptr, CPTR2BP(buf)); /* adjust number of bits requested if ubc passed in */ #ifdef _CRAY NUMARG(na); #elif defined(__mips) || defined(_LITTLE_ENDIAN) na = 3; #endif locubc = 0; pubc = &locubc; locfulp = FULL; pstat = &locstat; #if !defined(__mips) && !defined(_LITTLE_ENDIAN) va_start(ap, nbytes); if (na < 3 || na > 6) { errno = FDC_ERR_NOPARM; return(ERR); } if (na > 3) pstat = va_arg(ap, struct ffsw *); if (na > 4) locfulp = va_arg(ap, int); if (na > 5) pubc = va_arg(ap, int *); #endif CHECK_FIOPTR(fio, pstat); ret = XRCALL(fio, readrtn) fio, bufptr, nbytes, pstat, locfulp, pubc); if (na < 4) errno = locstat.sw_error; return (ret); }
/* * This routine is like ffread, except it expects all parameters . * If ubc == NULL, then do not return ubc information to user. */ ssize_t ffreadf(int fd, void *buf, size_t nbytes, struct ffsw *pstat, int locfulp, int *ubc) { struct fdinfo *fio; ssize_t ret; bitptr bufptr; int locubc, *pubc; if (ubc == NULL) { pubc = &locubc; locubc = 0; } else { pubc = ubc; } fio = GETIOB(fd); SET_BPTR(bufptr, CPTR2BP(buf)); CHECK_FIOPTR(fio, pstat); ret = XRCALL(fio, readrtn) fio, bufptr, nbytes, pstat, locfulp, pubc); return (ret); }
/* * get_segment grabs another record segment from the data stream. * fio->scc will be set as follows: * SCCMIDL fio->segbits = fio_cnt (all or rest of data in fio * buffer) and no EOR was found * SCCFULL an EOR was found * * return values are as follows: * 2 encountered end of data -> stat will be set * 1 encountered end of file -> stat will be set * 0 segment (or part) is received -> stat will NOT be set * ERR encountered an error -> stat will be set */ static int get_segment(struct fdinfo *fio, struct ffsw *stat) { long tword; int left; ssize_t ret; unsigned char *cp; bitptr tbptr; struct text_f *text_info; text_info = (struct text_f *)fio->lyr_info; fio->lastscc = fio->scc; /* * If buffer is empty, or not enough to hold entire EOF marker, * get more data. */ if (fio->_cnt == 0 || fio->_cnt < text_info->eof_len) { /* * If num of bits not enough to hold EOF, move remainder * to base of buffer and read in at base+remainder. Adjust * pointers and counts accordingly. */ left = 0; if (fio->_cnt > 0) { bitptr tptr; left = fio->_cnt; /* * Move tail of data to the first word of the * buffer (right justified). */ GET_BITS(tword, fio->_ptr, left); SET_BPTR(tptr, fio->_base); PUT_BITS(tptr, tword, left); SET_BPTR(fio->_ptr, INC_BPTR(fio->_base, left)); } else fio->_ptr = fio->_base; /* reset _ptr */ zero = 0; READBLK(ret, fio, (size_t)((uint64)fio->maxblksize >> 3), stat, PARTIAL, &zero); /* * Add back in the 'extra' data */ fio->_ptr = fio->_base; /* reset _ptr */ fio->_cnt = fio->_cnt + left; if (ret < (ssize_t)0) return(ERR); if (zero != 0) ERETURN(stat, FDC_ERR_UBC, 0); if (fio->_cnt == 0) /* must be at EOD */ { return(setend(fio, stat)); } }
int _cdc_open( const char *name, int flags, int mode, struct fdinfo *fio, union spec_u *spec, struct ffsw *stat, long cbits, int cblks, struct gl_o_inf *oinf) { int nextfio = 0; int ll_blocked; char *ptr; union spec_u *nspec; int recsize, blksize; struct ffsw *dumstat; struct cdc_f *cdc_info; recsize = 0; /* this is ignored */ /* * Blocksize is 512 60 bit words, or 5120 6-bit characters */ blksize = 5120*6; /* other block sizes not allowed */ /* * Internally, both blksize and recsize are in bits! */ switch(spec->fld.recfmt) { case TR_CDC_CZ: fio->maxrecsize = recsize; break; case TR_CDC_CS: case TR_CDC_IW: case TR_CDC_CW: fio->maxrecsize = -1; break; } fio->maxblksize = blksize; /* * Allocate buffer: * block size plus possible 48 bit block terminator plus one 60-bit word * plus 16 slop bytes. */ fio->_ffbufsiz = blksize + 48 + 64 + 64 + 7; /* bufsiz in bytes + fudge */ ptr = malloc((fio->_ffbufsiz >> 3) + 16); if (ptr == NULL) goto nomem; /* * Allocate private storage area */ cdc_info = (struct cdc_f *)calloc(sizeof(struct cdc_f), 1); if (cdc_info == NULL) goto nomem; fio->lyr_info = (char *)cdc_info; SET_BPTR(fio->_base, CPTR2BP(ptr)); fio->rwflag = POSITIN; fio->segbits = 0; fio->_cnt = 0; fio->_ptr = fio->_base; /* * Now, open the lower layers... */ nspec = spec; NEXT_SPEC(nspec); nextfio = _ffopen(name, flags, mode, nspec, stat, cbits, cblks, NULL, oinf); if (nextfio < 0) goto badret; fio->fioptr = (struct fdinfo *)nextfio; XRCALL(fio->fioptr, fcntlrtn) fio->fioptr, FC_GETINFO, &cdc_info->ffci, &dumstat); ll_blocked = cdc_info->ffci.ffc_flags & FFC_REC; switch(fio->subtype) { case TR_CDC_BT_DISK: break; /* either record or stream is OK */ case TR_CDC_BT_SI: case TR_CDC_BT_I: if (ll_blocked == 0) /* if not blocked */ { _SETERROR(stat, FDC_ERR_NOBDRY, 0); goto badret; } break; } DUMP_IOB(fio); /* debugging only */ return(nextfio); nomem: _SETERROR(stat, FDC_ERR_NOMEM, 0); badret: if (nextfio > 0) XRCALL(fio->fioptr, closertn) fio->fioptr, &dumstat); if (BPTR2CP(fio->_base) != NULL) free(BPTR2CP(fio->_base)); if (fio->lyr_info != NULL) free(fio->lyr_info); return(ERR); }
struct cch_buf * _cch_getblk( struct cch_f *cch_info, /* cch_f structure for the file */ struct fdinfo *llfio, /* ffio file descriptor for underlying layer */ off_t fileaddr, /* bit offset within the file of the buffer. * This number must be a multiple of the buffer * size. */ int64 *nblkp, /* on input, the number of contiguous buffer * blocks sought. On output, assigned the * actual number of contiguous buffer blocks * assigned. */ int rd, /* 0 if all of the new blocks may be * assigned without reading the file page. * != 0 if the pages must be read. */ int valid, /* 0 if the CCH_VALIDBUFFER bit should */ /* not be set in the new blocks */ struct ffsw *stat /* pointer to status return word */ ) { int i, nbu, ret; int bs; int lru_id; /* buffer number of least recently */ /* used buffer. */ int limit; int64 nblk; off_t endaddr, firstpaddr, faddr; long *wptr; long lru_tm; struct cch_buf *cubuf; struct cch_buf *cbufs; struct cch_buf *fb; CCH_DEBUG(("_cch_getblk EN: to bit offset %d\n",fileaddr)); nbu = cch_info->nbufs; cbufs = cch_info->bufs; bs = cch_info->bsize; nblk = *nblkp; if (nblk > 1) { /* * Find the first page in the consecutive list of pages which * is buffer-resident. */ endaddr = fileaddr + nblk * bs; firstpaddr = endaddr; for (i=0; i<nbu; i++) { off_t x; cubuf = &cbufs[i]; x = cubuf->filead; if (fileaddr <= x && x < firstpaddr) firstpaddr = x; } if (firstpaddr < endaddr) /* a page is buffer resident */ nblk = *nblkp = (firstpaddr - fileaddr) / bs; if (nblk <= 0) return((struct cch_buf *)NULL); /* shouldn't happen ! */ } /* * Find the least-recently accessed sequence of *nblkp contiguous buffers. * Free buffers are counted as if their last access time was 0. * Search the buffers in groups of size nblk to speed this search and * reduce fragmentation of the cache. When nblk>1, this algorithm * approximates LRU and, most importantly, is deterministic. */ lru_tm = MAXLONG; /* min _rtc() value in upcoming loop */ lru_id = 0; for (i=0; i<(nbu-nblk+1); i+=nblk) { long last_access = 0; /* free pages have last_access == 0 */ if (cbufs[i].filead >= 0) last_access = cbufs[i].atime; if (last_access < lru_tm) { lru_tm = last_access; lru_id = i; } } /* * Use the least recently used page buffer or group of page buffers. * Flush any of these page buffers which have the dirty bit set. When * several adjacent buffers are dirty and correspond to adjacent pages * in the file, they can be flushed with one request. */ fb = &cbufs[lru_id]; for (i=0; i<nblk; i++) { int contig = 0; /* number of contiguous dirty buffers */ faddr = fb[i].filead; if (faddr == -1) continue; /* buffer is free */ while (i+contig < nblk && (fb[i+contig].flags & CCH_DIRTY) && fb[i+contig].filead == faddr) { if (fb[i+contig].lastdata || fb[i+contig].firstdata) { if (contig == 0) contig = 1; break; } contig++; faddr += bs; } if (contig > 0) { if (faddr > cch_info->fsize) { /* eof is in the last buffer */ /* clear it if necessary */ if ((fb[i+contig-1].flags & CCH_ZEROED) == 0){ bitptr toptr; off_t eofaddr; int pgoff; eofaddr = CCHFLOOR(cch_info->fsize, bs); pgoff = cch_info->fsize - eofaddr; SET_BPTR(toptr, INC_BPTR(fb[i+contig-1].buf, pgoff)); CCH_MEMCLEAR(toptr,(bs - pgoff)); fb[i+contig-1].flags |= CCH_ZEROED; } } ret = _cch_wrabuf(cch_info, llfio, &fb[i], BITS2BYTES(bs), BITS2BYTES(fb[i].filead), contig, &cch_info->feof, #if defined(__mips) || defined(_LITTLE_ENDIAN) 's', /* flush synchronously */ #else 'a', /* flush asynchronously */ #endif stat); if (ret == ERR) return((struct cch_buf *)NULL); i += contig - 1; } } /* * Wait for any active page buffer I/O, and then requisition the buffers * for the appropriate file pages. */ for (i=0; i<nblk; i++) { if (fb[i].flags & (CCH_WRITING | CCH_READING)) { CCHWAITIO(llfio,&fb[i],stat,ret); if (ret == ERR) return((struct cch_buf *)NULL); } fb[i].filead = fileaddr + i * bs; fb[i].flags = CCH_VALID; fb[i].firstdata = fb[i].lastdata = 0; if (valid) fb[i].flags |= CCH_VALIDBUFFER; } /* * Now start the synchronous reading of the file page into the buffer. If * all of the pages lie beyond the EOF, then suppress the read. */ if (rd) { if (fileaddr < cch_info->feof) { int by_tran; fb->sw.sw_flag = 0; /* indicate I/O in progress */ ret = _cch_rdabuf(cch_info, llfio, fb, BITS2BYTES(bs), BITS2BYTES(fb->filead), nblk, 's',stat); if (ret == ERR) return((struct cch_buf *)NULL); /* * Zero portions of the buffers past the end of file. */ by_tran = fb->sw.sw_count; #ifdef CCH_SDS_SUPPORTED if (cch_info->optflags & CCHOPT_SDS) { int ret; ret = _sdsset( (BPTR2CP(fb->buf) - (char*)NULL) + by_tran, 0, nblk * BITS2BYTES(bs) - by_tran); if (ret == ERR) { _SETERROR(stat, errno, 0); return((struct cch_buf *)NULL); } } else #endif { if ((nblk*BITS2BYTES(bs)-by_tran) != 0) (void)memset(BPTR2CP( fb->buf) + by_tran, 0, nblk * BITS2BYTES(bs) - by_tran); } for (i=0; i<nblk; i++) { fb[i].flags |= CCH_ZEROED; } } else { /* page lies beyond EOF */ /* * Zero the entire buffer. */ #ifdef CCH_SDS_SUPPORTED if (cch_info->optflags & CCHOPT_SDS) { int ret; ret = _sdsset( (BPTR2CP(fb->buf) - (char*)NULL), 0, nblk * BITS2BYTES(bs)); if (ret == ERR) { _SETERROR(stat, errno, 0); return((struct cch_buf *)NULL); } for (i=0; i<nblk; i++) { fb[i].flags |= CCH_ZEROED; } } else #endif if (fileaddr < cch_info->fsize){ /* this block is between cch_info->feof and */ /* cch_info->fsize, so we must zero it */ /* Logic in other parts of this layer will */ /* only zero what is beyond cch_info->fsize */ #ifdef _CRAY1 wptr = BPTR2WP(fb->buf); limit = (nblk * bs) >> 6; /* convert to words */ /* this loop vectorizes! */ for (i=0; i<limit; i++) wptr[i] = 0; #else memset(BPTR2CP(fb->buf), 0, (nblk * BITS2BYTES(bs))); #endif for (i=0; i<nblk; i++) { fb[i].flags |= CCH_ZEROED; } } } }
int _sds_open( const char *name, int flags, int mode, struct fdinfo *fio, union spec_u *spec, struct ffsw *stat, long cbits, int cblks, struct gl_o_inf *oinf) { char *ptr; int nextfio; int bs, ret; long mininc; struct fdinfo *nfioptr; union spec_u *nspec; struct sds_f *sds_info; struct ffsw clstat; struct stat statbuf; /* * Allocate private storage */ sds_info = (struct sds_f *) calloc(sizeof(struct sds_f),1); if (sds_info == NULL) goto nomem; fio->lyr_info = (char *)sds_info; sds_info->bdfd = -1; /* also used as a flag */ sds_info->name = strdup(name); if (sds_info->name == NULL) goto badret; /* * Internally, both blksize and recsize are in bits! */ bs = 4096 * 8; /* one sector buffer */ fio->_ffbufsiz = bs; /* bit size of buffer, 1 sector */ ptr = malloc((bs >> 3)+16); if (ptr == NULL) goto nomem; SET_BPTR(fio->_base, CPTR2BP(ptr)); fio->rwflag = POSITIN; sds_info->sdsdirty = 0; sds_info->overflowed = NO; sds_info->ovoff = 0; fio->segbits = 0; fio->_cnt = 0; fio->_ptr = fio->_base; /* * Open the lower layers */ nspec = spec; NEXT_SPEC(nspec); if (fio->rtype != TR_FSS_SCR || fio->subtype != FSS_OPT_NOVFL) { int write_only = 0; int llflags = flags; /* * The lower level file must be readable to allow loading into * memory. */ if ((llflags & O_ACCMODE) == O_WRONLY) { write_only = 1; llflags &= ~O_ACCMODE; llflags |= O_RDWR; } nextfio = _ffopen(name, llflags, mode, nspec, stat, cbits, cblks, NULL, oinf); /* * If write_only and file has no read permissions, then it * may be opened O_WRONLY. But it better be empty, because if not * _sds_load will get an error later. */ if (nextfio < 0 && write_only) { llflags = flags; nextfio = _ffopen(name, llflags, mode, nspec, stat, cbits, cblks, NULL, oinf); } if (nextfio < 0) goto badret; ret = XRCALL((struct fdinfo *)nextfio, fcntlrtn) (struct fdinfo *)nextfio, FC_STAT, &statbuf, stat); if (ret < 0) goto close_badret; sds_info->dsk_blksize = statbuf.st_blksize; }
/* * _cca_listio * * Issue a listio request for the cachea layer. * * Return Value: * * On success, nreq is returned, and the contents of the stat structure are * unspecified. * * If an error in setup is encountered, stat is set as follows: * * stat->sw_error = error code * stat->sw_stat = FFERR * stat->sw_flag = 1 * stat->sw_count = 0 * * If an error in I/O request I is detected, the list[I].li_stat * structure will be set as follows: * * list[I].li_stat->sw_error = error code * list[I].li_stat->sw_flag = 1 */ _cca_listio( int cmd, /* LC_START or LC_START */ struct fflistreq *list, /* list of requests (see fflistio) */ int nreq, /* number of requests */ struct ffsw *stat) /* status structure */ { int ret; int i; int n_handled; int status; int zero; int pos; bitptr buf; struct ffsw loc_stat; struct fdinfo *fio; struct fdinfo *oldfio; struct cca_f *cca_info; n_handled = 0; oldfio = GETIOB(list[0].li_fildes); cca_info = (struct cca_f *)oldfio->lyr_info; for (i = 0; i < nreq; i++) { fio = GETIOB(list[i].li_fildes); if (fio != oldfio) { _SETERROR(list[i].li_status, FDC_ERR_LSTIO, 0); continue; } if ( list[i].li_signo != 0 ) { _SETERROR(list[i].li_status, FDC_ERR_REQ, 0); continue; } cca_info = (struct cca_f *)fio->lyr_info; CLRFFSTAT(*(list[i].li_status)); SET_BPTR(buf, CPTR2BP(list[i].li_buf)); if ( list[i].li_nstride > 1 ) { status = _ffcompound(&list[i]); if (status == 0) n_handled++; continue; } if ( list[i].li_flags == LF_LSEEK ) { pos = _cca_seek(fio, list[i].li_offset, SEEK_SET, &loc_stat); if (pos == -1) { *list[i].li_status = loc_stat; continue; } } else if (list[i].li_flags != 0) { _SETERROR(list[i].li_status, FDC_ERR_REQ, 0); } zero = 0; status = 0; if ( cmd == LC_START ) { if ( list[i].li_opcode == LO_READ ) { status = _cca_reada(fio, buf, list[i].li_nbyte, list[i].li_status, FULL, &zero ); } else if (list[i].li_opcode == LO_WRITE ) { status = _cca_writea(fio, buf, list[i].li_nbyte, list[i].li_status, FULL, &zero ); } else { _SETERROR(list[i].li_status, FDC_ERR_REQ, 0); } } else if ( cmd == LC_WAIT ) { if ( list[i].li_opcode == LO_READ ) { status = _cca_read(fio, buf, list[i].li_nbyte, list[i].li_status, FULL, &zero ); } else if (list[i].li_opcode == LO_WRITE ) { status = _cca_write(fio, buf, list[i].li_nbyte, list[i].li_status, FULL, &zero ); } else { _SETERROR(list[i].li_status, FDC_ERR_REQ, 0); } } else { _SETERROR(list[i].li_status, FDC_ERR_REQ, 0); } if (status == ERR) { continue; } n_handled++; } return( n_handled ); }
/* * Write a X class file * Parameters: * fio - Pointer to fdinfo block * bufptr - bit pointer to where data is to go. * nbytes - Nuber of bytes to be written * stat - pointer to status return word * fulp - full or partial write mode flag * ubc - pointer to unused bit count (not used for IBM) */ ssize_t _gen_xwrite( struct fdinfo *fio, bitptr bufptr, size_t nbytes, struct ffsw *stat, int fulp, int *ubc) { ssize_t ret; int64 nbits, bits, moved; long left; nbits = (uint64)nbytes << 3; if (*ubc != 0) ERETURN(stat, FDC_ERR_UBC, 0); /* * If we've been reading, then try to switch the buffer into write mode. */ if (fio->rwflag == READIN) { /* * Issue an error if we are not positioned at a record * boundary. ffweof would terminate the current record, but * _cos_write overwrites the current record. We need to * decide which is the proper approach before permitting this * here. */ if (fio->_cnt > 0) ERETURN(stat, FDC_ERR_NOTREC, 0); ret = gen_xwrard(fio, stat); if (ret < 0) return(ERR); } fio->rwflag = WRITIN; moved = 0; /* * Check for record size exceeded. */ if ((fio->maxrecsize > 0) && (fio->recbits + nbits) > fio->maxrecsize) ERETURN(stat, FDC_ERR_MXREC, 0); /* * loop putting data in buffer and building segments */ while (nbits > 0) { /* * bits tells when data has been moved. Set it to zero * unless someone moves some data in the loop */ bits = 0; /* * initialize a new segment, if needed. */ if (fio->recbits == 0) { if (init_seg(fio, stat) != 0) return(ERR); } /* * If enough room for bits, put them in the buffer */ left = fio->_ffbufsiz - fio->_cnt; if (left >= nbits) { bits = nbits; PUTDATA(bufptr, fio, bits); SET_BPTR(bufptr, INC_BPTR(bufptr, bits)); } else { /* * There is not enough room to put all of the data. */ if (left == 0) { ret = put_segment(fio, stat, PARTIAL); if (ret != 0) return(ERR); } else { bits = nbits; if (nbits > left) bits = left; PUTDATA(bufptr, fio, bits); SET_BPTR(bufptr, INC_BPTR(bufptr, bits)); } } nbits -= bits; moved += bits; } fio->recbits += moved; if (fulp == FULL) { /* * Watch out for NULL writes! */ if (fio->recbits == 0) if (init_seg(fio, stat) != 0) return(ERR); ret = put_segment(fio, stat, fulp); /* this will be FULL */ if (ret != 0) return(ERR); SETSTAT(stat, FFEOR, (uint64)moved >> 3); fio->last_recbits = fio->recbits; fio->recbits = 0; } else
int _sqb_pos(struct fdinfo *fio, int cmd, long *arg, int len, struct ffsw *stat) { int ret = 0; struct sqb_f *sqb_info; struct sqbio *sqbptr; struct sqbio *sqborig; struct sqbio *s; struct fdinfo *llfio; int found = 0; int nbits; int sync = -1; llfio = fio->fioptr; sqb_info = (struct sqb_f *)fio->lyr_info; if (fio->rwflag == WRITIN) { /* flush buffers and wait for outstanding I/O to finish. */ if (_sqb_flush(fio, stat) < 0) { return(ERR); } } switch(cmd) { /* For now, this is not supported on SGI systems. */ /* We need to work out what "arg" should be. */ #if !defined(__mips) && !defined(_LITTLE_ENDIAN) case FP_RSEEK: if ((fio->rwflag == READIN) || (fio->rwflag == POSITIN)) { if (*arg < 0) { /* Seeking backwards */ /* Are we seeking within the current */ /* buffer? */ sqbptr = sqb_info->sqbio_cur; if (sqbptr->status == IOACTIVE) { while (sqbptr->iostat.sw_flag == 0 || sqbptr->iostat.sw_stat == 0) { ret = XRCALL(llfio,fcntlrtn) llfio, FC_RECALL, &(sqbptr->iostat), stat); if (ret < 0) { return(ERR); } } if (FFSTAT(sqbptr->iostat) == FFERR) { ERETURN(stat, sqbptr->iostat.sw_error,0); } sqbptr->_cnt = sqbptr->iostat.sw_count<<3; sqbptr->status = IODATA; } if (sqbptr->status == IODATA) { nbits = -(*arg); /* convert to positive */ nbits = nbits<<3; if (nbits <= SUBT_BPTR(sqb_info->_ptr,sqbptr->_base)){ SET_BPTR(sqb_info->_ptr, INC_BPTR(sqb_info->_ptr,-nbits)); sqbptr->_cnt += nbits; break; } } } else { /* seeking forward */ /* Any chance that the position would be in */ /* our buffers? */ nbits = *arg << 3; if (nbits > sqb_info->nbuf * sqb_info->bufsiz){ /* won't be in any of the buffers */ goto a1; } sqbptr = sqb_info->sqbio_cur; sqborig = sqbptr; do { if (sqbptr->status == IOACTIVE) { while (sqbptr->iostat.sw_flag == 0 || sqbptr->iostat.sw_stat == 0) { ret = XRCALL(llfio,fcntlrtn) llfio, FC_RECALL, &(sqbptr->iostat), stat); if (ret < 0) { return(ERR); } } if (FFSTAT(sqbptr->iostat) == FFERR) { ERETURN(stat, sqbptr->iostat.sw_error,0); } sqbptr->_cnt = sqbptr->iostat.sw_count<<3; sqbptr->status = IODATA; } if (sqbptr->status == IODATA) { if (nbits <= sqbptr->_cnt) { /* Desired position is in this buffer */ sqbptr->_cnt -= nbits; /* Clear out buffers that preceeded this */ s = sqborig; for (; s != sqbptr; s= s->nxt) { s->status = EMPTY; CLRFFSTAT(s->iostat); } sqb_info->sqbio_cur = sqbptr; if (sqbptr != sqborig) sqb_info->_ptr = sqbptr->_base; SET_BPTR(sqb_info->_ptr, INC_BPTR(sqb_info->_ptr,nbits)); found = 1; break; } else { nbits -= sqbptr->_cnt; } } else goto a1; /* all out of data */ sqbptr = sqbptr->nxt; } while (sqbptr != sqborig); } }
/* * * Description: * writes nbytes bytes, with *ubc unused bits, from bufptr to * the next lower layer. * Parameters: * fio - Pointer to fdinfo block * bufptr - bit pointer to user's data * nbytes - Number of bytes to be written * stat - pointer to status return word * fulp - full or partial write mode flag * ubc - pointer to unused bit count * Returns: * number of bytes written * -1 if error */ ssize_t _sqb_write( struct fdinfo *fio, bitptr bufptr, size_t nbytes, struct ffsw *stat, int fulp, int *ubc) { int ret; int bs, btomove; uint64 nbits; ssize_t moved; struct sqb_f *sqb_info; struct fdinfo *llfio; struct ffsw locstat; struct sqbio *sqbptr; int zero = 0; nbits = ((uint64)nbytes << 3) - *ubc; sqb_info = (struct sqb_f *)fio->lyr_info; llfio = fio->fioptr; moved = 0; if (fio->rwflag == READIN || fio->rwflag == POSITIN) { /* synchronize physical position with logical position */ if (_sqb_sync(fio, &locstat, 1) < 0) { goto erret; } } fio->rwflag = WRITIN; bs = sqb_info->bufsiz>>3; sqbptr = sqb_info->sqbio_cur; while (nbits != 0) { if (sqbptr->status == IOACTIVE) { /* wait for the outstanding asynch i/o to complete */ while (sqbptr->iostat.sw_flag == 0 || sqbptr->iostat.sw_stat == 0) { ret = XRCALL(llfio,fcntlrtn) llfio, FC_RECALL, &(sqbptr->iostat), &locstat); if (ret < 0) { goto erret; } } if (sqbptr->iostat.sw_error != 0) { ERETURN(stat, sqbptr->iostat.sw_error, 0); } if (sqbptr->iostat.sw_count != sqbptr->_iowritten) { ERETURN(stat, FDC_ERR_WRTERR, 0); } sqbptr->status = EMPTY; sqbptr->_cnt = sqb_info->bufsiz; CLRFFSTAT(sqbptr->iostat); } if (sqbptr->status == EMPTY) { sqbptr->_cnt = sqb_info->bufsiz; } /* * Move data from user to buffer */ btomove = MIN(nbits, sqbptr->_cnt); MOV_BITS(sqb_info->_ptr, bufptr, btomove); SET_BPTR(bufptr, INC_BPTR(bufptr, btomove)); nbits -= btomove; sqbptr->_cnt -= btomove; sqbptr->status = IODATA; if (sqbptr->_cnt == 0) { /* no room left in this buffer; start I/O on it */ CLRFFSTAT(sqbptr->iostat); sqbptr->_iowritten = bs; if( XRCALL(llfio, writeartn) llfio, sqbptr->_base,(size_t) bs, &(sqbptr->iostat), FULL, &zero) < 0) { ERETURN(stat, sqbptr->iostat.sw_error, (moved +7) >> 3); } sqbptr->status = IOACTIVE; sqb_info->sqbio_cur = sqb_info->sqbio_cur->nxt; sqbptr = sqb_info->sqbio_cur; sqb_info->_ptr = sqb_info->sqbio_cur->_base; } else {
/* * _any_sds_fr_mem moves data into a secondary data segment (SDS) from * user memory * * unlike _sds_fr_mem, _any_sds_fr_mem handles moving a number of bits that * may not be a multiple of 512. * * Returns 0 on normal return, or else -1 with error code in errno. */ _any_sds_fr_mem( bitptr sdsaddr, /* SDS bit address of data */ bitptr ubuf, /* user buffer to receive data */ int nbits /* number of bits to move */ ) { int sds_bit_offset; int sds_bit_offset_blk; int rbits; char localbuf[BYTPBLOCK]; bitptr locptr; long *uwaddr; char *ucaddr; sds_bit_offset = SUBT_BPTR(sdsaddr, WPTR2BP(0)); if (sds_bit_offset & (BITPBLOCK -1)) { /* The sds address is not on a block boundary. */ /* Read data from sds to a local buffer. Copy the */ /* user's memory to the appropriate part of the local */ /* buffer, and write it back out to sds. */ sds_bit_offset_blk = (sds_bit_offset & ~(BITPBLOCK - 1)); if (ssread((int *)localbuf, BITS2BLOCKS(sds_bit_offset_blk), 1) == -1) { errno = FDC_ERR_SDSIO; return(-1); } rbits = MIN(nbits, BITPBLOCK - (sds_bit_offset - sds_bit_offset_blk)); locptr = CPTR2BP(localbuf); SET_BPTR(locptr, INC_BPTR(locptr, sds_bit_offset - sds_bit_offset_blk)); MOV_BITS(locptr, ubuf, rbits); SET_BPTR(ubuf, INC_BPTR(ubuf, rbits)); nbits -= rbits; if(sswrite((int *)localbuf, BITS2BLOCKS(sds_bit_offset_blk), 1) == -1) { errno = FDC_ERR_SDSIO; return(-1); } SET_BPTR(sdsaddr, INC_BPTR(sdsaddr, rbits)); if (nbits == 0) return(0); assert(((SUBT_BPTR(sdsaddr, WPTR2BP(0))) & (BITPBLOCK -1)) == 0); } sds_bit_offset = SUBT_BPTR(sdsaddr, WPTR2BP(0)); uwaddr = BPTR2WP(ubuf); ucaddr = BPTR2CP(ubuf); if ((nbits & (BITPBLOCK-1)) || (ucaddr != (char *)uwaddr)){ int left; locptr = CPTR2BP(localbuf); /* round down nbits to a block boundary */ rbits = nbits & ~(BITPBLOCK-1); if (rbits) { if (ucaddr != (char*)uwaddr) { /* ubuf is not word aligned. */ left = rbits; sds_bit_offset_blk = BITS2BLOCKS(sds_bit_offset); while (left > 0) { if( ssread((int *)localbuf, sds_bit_offset_blk, 1) == -1) { errno = FDC_ERR_SDSIO; return(-1); } MOV_BITS(locptr, ubuf, BITPBLOCK); SET_BPTR(ubuf, INC_BPTR(ubuf, BITPBLOCK)); if( sswrite((int *)localbuf, sds_bit_offset_blk, 1) == -1) { errno = FDC_ERR_SDSIO; return(-1); } SET_BPTR(sdsaddr, INC_BPTR(sdsaddr, BITPBLOCK)); sds_bit_offset_blk++; left-= BITPBLOCK; } } else { if (_sds_fr_mem(sdsaddr, ubuf, rbits) == -1) { return(-1); } SET_BPTR(ubuf, INC_BPTR(ubuf, rbits)); SET_BPTR(sdsaddr, INC_BPTR(sdsaddr, rbits)); } sds_bit_offset = SUBT_BPTR(sdsaddr, WPTR2BP(0)); } /* Get last block into local memory. Merge in user's memory */ /* and write it back out to sds. */ if( ssread((int *)localbuf, BITS2BLOCKS(sds_bit_offset), 1) == -1) { errno = FDC_ERR_SDSIO; return(-1); } MOV_BITS(locptr, ubuf, nbits - rbits); if( sswrite((int *)localbuf, BITS2BLOCKS(sds_bit_offset), 1) == -1) { errno = FDC_ERR_SDSIO; return(-1); } } else { if(sswrite(uwaddr, BITS2BLOCKS(sds_bit_offset), BITS2BLOCKS(nbits)) == -1) { errno = FDC_ERR_SDSIO; return(-1); } } return(0); }
/* * _any_mem_fr_sds moves data into user memory from a secondary data segment (SDS). * * unlike _mem_fr_sds, _any_mem_fr_sds handles moving a number of bits that * may not be a multiple of 512. * * Returns 0 on normal return, or else -1 with error code in errno. */ _any_mem_fr_sds( bitptr ubuf, /* user buffer to receive data */ bitptr sdsaddr, /* SDS bit address of data */ int nbits /* number of bits to move */ ) { int sds_bit_offset; int sds_bit_offset_blk; int rbits; char localbuf[BYTPBLOCK]; bitptr locptr; long *uwaddr; char *ucaddr; sds_bit_offset = SUBT_BPTR(sdsaddr, WPTR2BP(0)); if (sds_bit_offset & (BITPBLOCK -1)) { /* The sds address is not on a block boundary. */ /* Read data from sds to a local buffer. Copy the */ /* appropriate part of the local buffer to user's memory. */ sds_bit_offset_blk = (sds_bit_offset & ~(BITPBLOCK - 1)); if(ssread((int *)localbuf, BITS2BLOCKS(sds_bit_offset_blk), 1) == -1) { errno = FDC_ERR_SDSIO; return(-1); } rbits = MIN(nbits, BITPBLOCK - (sds_bit_offset - sds_bit_offset_blk)); locptr = CPTR2BP(localbuf); SET_BPTR(locptr, INC_BPTR(locptr, sds_bit_offset - sds_bit_offset_blk)); MOV_BITS(ubuf, locptr, rbits); SET_BPTR(ubuf, INC_BPTR(ubuf, rbits)); nbits -= rbits; SET_BPTR(sdsaddr, INC_BPTR(sdsaddr, rbits)); if (nbits == 0) return(0); /* Verify that our sds address is now on a block boundary */ assert (((SUBT_BPTR(sdsaddr, WPTR2BP(0))) & (BITPBLOCK -1)) == 0); } sds_bit_offset = SUBT_BPTR(sdsaddr, WPTR2BP(0)); uwaddr = BPTR2WP(ubuf); ucaddr = BPTR2CP(ubuf); if ((nbits & (BITPBLOCK-1)) || (ucaddr != (char *)uwaddr)){ int left; /* Either we are not reading in a multiple of blocks or */ /* the user's address is not word-aligned. */ /* Round nbits down to a block boundary and */ /* move those to user's memory. */ locptr = CPTR2BP(localbuf); rbits = nbits & ~(BITPBLOCK-1); if (rbits) { if (ucaddr != (char*)uwaddr) { /* ubuf is not word aligned. */ /* Read the data from sds into a local */ /* buffer and copy to the user's memory */ left = rbits; sds_bit_offset_blk = BITS2BLOCKS(sds_bit_offset); while (left > 0) { if (ssread((int *)localbuf, sds_bit_offset_blk, 1) == -1) { errno = FDC_ERR_SDSIO; return(-1); } MOV_BITS(ubuf, locptr, BITPBLOCK); SET_BPTR(ubuf, INC_BPTR(ubuf, BITPBLOCK)); SET_BPTR(sdsaddr, INC_BPTR(sdsaddr, BITPBLOCK)); sds_bit_offset_blk++; left-= BITPBLOCK; } } else { if (ssread(uwaddr, BITS2BLOCKS(sds_bit_offset), BITS2BLOCKS(rbits)) == -1) { errno = FDC_ERR_SDSIO; return(-1); } SET_BPTR(ubuf, INC_BPTR(ubuf, rbits)); SET_BPTR(sdsaddr, INC_BPTR(sdsaddr, rbits)); } sds_bit_offset = SUBT_BPTR(sdsaddr, WPTR2BP(0)); } /* get last block into local memory and */ /* transfer to user's memory */ if (ssread((int *)localbuf, BITS2BLOCKS(sds_bit_offset), 1) == -1) { errno = FDC_ERR_SDSIO; return(-1); } assert((nbits - rbits) < BITPBLOCK); MOV_BITS(ubuf, locptr, nbits - rbits); } else { if(ssread(uwaddr, BITS2BLOCKS(sds_bit_offset), BITS2BLOCKS(nbits)) == -1) { errno = FDC_ERR_SDSIO; return(-1); } } return(0); }
_ffopen_t _gen_xopen( const char *name, int flags, mode_t mode, struct fdinfo *fio, union spec_u *spec, struct ffsw *stat, long cbits, int cblks, struct gl_o_inf *oinf ) { char *ptr; union spec_u *nspec; int blksize; _ffopen_t nextfio; int isvalid; struct gen_xf *xf_info; /* * Allocate private storage */ xf_info = (struct gen_xf *)calloc(sizeof(struct gen_xf),1); if (xf_info == NULL) goto nomem; fio->lyr_info = (char *)xf_info; /* * select parameters based on record type */ switch(fio->rtype) { case TR_NVE_V: xf_info->rdwlen = 112; /* bits */ break; case TR_CRAY_V: xf_info->rdwlen = 64; /* bits */ break; #ifdef _OLD_F77 case TR_UX_VAX: case TR_UX_SUN: xf_info->rdwlen = 32; /* bits */ break; #endif case TR_205_W: xf_info->rdwlen = 64; /* bits */ break; } xf_info->last_lrdwaddr = 0; xf_info->lrdwaddr = 0; /* * Record the maximum record size in bits. * A value of 0 is stored if this is unspecified. */ fio->maxrecsize = _ff_nparm_getv(spec, 1, &isvalid) * 8; /* * Record the buffer size in bits. */ blksize = _ff_nparm_getv(spec, 2, &isvalid) * 8; if (! isvalid || blksize < 256) /* bits, mighty small! */ blksize = X_BUFSIZ * BITPBLOCK; else blksize = (blksize + 077) & (~077);/* round to word size */ /* * Although the _ffbufsiz field is declared as long, * these routines use GETDATA and PUTDATA. Those macros * assign the amount to be written to integers. So, to * make this all work we need to be sure that the buffer size * does not exceed the size of an integer. */ if (blksize > (1<<sizeof(int)*8-5)){ _SETERROR(stat, FDC_ERR_BUFSIZ, 0); goto badret; } fio->_ffbufsiz = blksize; /* bit size of buffer */ ptr = malloc((blksize >> 3) + 16); if (ptr == NULL) goto nomem; SET_BPTR(fio->_base, CPTR2BP(ptr)); fio->scc = SCCFULL; fio->lastscc = SCCFULL; fio->rwflag = POSITIN; fio->segbits = 0; fio->_cnt = 0; fio->_ptr = fio->_base; /* * Now, open the lower layers */ nspec = spec; NEXT_SPEC(nspec); nextfio = _ffopen(name, flags, mode, nspec, stat, cbits, cblks, NULL, oinf); if (nextfio < 0) goto badret; DUMP_IOB(fio); /* debugging only */ return(nextfio); nomem: _SETERROR(stat, FDC_ERR_NOMEM, 0); badret: if (BPTR2CP(fio->_base) != NULL) free(BPTR2CP(fio->_base)); if (fio->lyr_info != NULL) free(fio->lyr_info); return(_FFOPEN_ERR); }
/* * Read TEXT records * * Records and blocks are supported only in multiples of 8 bits. * There is no logical limit for the maximum record size. The * maximum block size is determined arbitrarily to be 512 words. * * Parameters: * fio - Pointer to fdinfo block * bufptr - bit pointer to where data is to go. * nbytes - number of bytes to be read * stat - pointer to status return word * fulp - full or partial write mode flag * ubc - pointer to unused bit count (not used) */ ssize_t _txt_read(struct fdinfo *fio, bitptr bufptr, size_t nbytes, struct ffsw *stat,int fulp, int *ubc) { int64 nbits, bits, movdbits; int ret, eorstat; nbits = (uint64)nbytes << 3; /* convert bytes to bits */ movdbits = 0; if (*ubc != 0) /* ubc should always be zero */ ERETURN(stat, FDC_ERR_UBC, 0); /* read after write error */ if (fio->rwflag == WRITIN) ERETURN(stat, FDC_ERR_RAWR, 0); fio->rwflag = READIN; /* set operation type */ /* * If segment is empty, get the next segment. */ fio->ateor = 0; if (fio->segbits == 0) { ret = get_segment(fio, stat); /* if EOF or EOD found */ if (ret > 0) return(0); if (ret < 0) return(ret); /* stat set by get_segment */ } /* * Loop until one of the following occurs: * - the caller's request of nbits is satisfied * - an EOR is found * - an EOF is found * - an EOD is found */ eorstat = FFCNT; while ( nbits > 0 ) /* while caller is not satisfied */ { /* * If more bits are requested than are in the segment, return * segment. If the scc (segment operation from get_segment()) * equals SCCFULL then return (i.e., hit end-of-record (EOR)). * If the scc equals SCCMIDL (i.e., the fio buffer is empty and * no EOR was hit) subtract the number of bits moved from nbits * and go on. */ bits = nbits; if (fio->segbits < nbits) bits = fio->segbits; GETDATA(bufptr, fio, bits); movdbits += bits; SET_BPTR(bufptr, INC_BPTR(bufptr, bits)); nbits -= bits; if (fio->segbits == 0) { if (fio->scc == SCCFULL) { nbits = 0; /* return anyway */ eorstat = FFEOR; } else { ret = get_segment(fio, stat); /* if EOF or EOD found */ if (ret > 0) return(0); if (ret < 0) return(ret); /* stat set by get_segment */ } } } /* end while */ /* * Set status now, before doing any skip to EOR * Must check EOR status again... */ if ((fio->segbits == 0) && (fio->scc == SCCFULL)) eorstat = FFEOR; fio->recbits += movdbits; /* * If the mode is FULL and more bits are * available in the current record, -or- if * the number of bits requested just happpened to * be the number of bits in the record, skip to the next EOR. * If EOF/EOD found while skipping, set the status (this is an error) * and return. */ if ((fulp == FULL) || (eorstat == FFEOR)) { ret = skip2eor(fio, stat); if (ret > 0) return(0); /* EOF/EOD */ if (ret < 0) return(ERR); /* Status should be set */ fio->last_recbits = fio->recbits; fio->recbits = 0; } SETSTAT(stat, eorstat, (uint64)movdbits >> 3); /* assume CNT */ return ((uint64)movdbits >> 3); } /* end of _txt_read */
_ffopen_t _gen_fopen( const char *name, int flags, mode_t mode, struct fdinfo *fio, union spec_u *spec, struct ffsw *stat, long cbits, int cblks, struct gl_o_inf *oinf) { char *ptr; union spec_u *nspec; long recsize, blksize; /* bits */ long rsz, mbs; /* bytes */ _ffopen_t nextfio; int rtype; struct gen_ff *ff_dat; /* * convert 8-bit bytes to bits */ rsz = spec->fld.recsize; mbs = spec->fld.mbs; rtype = spec->fld.recfmt; if (rtype < 0 || rtype >= NUM_F_TYPES) { _SETERROR(stat, FDC_ERR_BADSPC, 0); return(_FFOPEN_ERR); } /* * General limit checks from table. */ if (rsz == 0) { _SETERROR(stat, FDC_ERR_BADSPC, 0); goto badret; } if (rsz < _F_limits[rtype].min_rsz || rsz > _F_limits[rtype].max_rsz) { _SETERROR(stat, FDC_ERR_BADSPC, 0); goto badret; } if (mbs != 0) if (mbs < _F_limits[rtype].min_mbs || mbs > _F_limits[rtype].max_mbs) { _SETERROR(stat, FDC_ERR_BADSPC, 0); goto badret; } switch(rtype) { case TR_IBM_F: /* * if mbs and rsz specified with * F format and mbs != rsz then error */ if (mbs != rsz && mbs != 0) { _SETERROR(stat, FDC_ERR_BADSPC, 0); goto badret; } case TR_IBM_FB: if (mbs == 0) mbs = rsz; /* dflt mbs = rsz */ /* must be exact multiple */ if ((mbs % rsz) != 0) { _SETERROR(stat, FDC_ERR_BADSPC, 0); goto badret; } break; case TR_VMS_F_DSK: case TR_VMS_F_TP: case TR_VMS_F_TR: if (mbs == 0) /* unspecified */ { /* deflt mbs=rsz */ if (rtype != TR_VMS_F_TP) /* deflt mbs=rsz */ mbs = rsz; else if(rtype == TR_VMS_F_TP) { /* dflt mbs=2048 */ mbs = 2048; if (rsz > mbs) mbs = rsz; } } if (rsz > mbs) { _SETERROR(stat, FDC_ERR_BADSPC, 0); goto badret; } break; default: _SETERROR(stat, FDC_ERR_BADSPC, 0); goto badret; } recsize = rsz << 3; blksize = mbs << 3; /* * Internally, both blksize and recsize are in bits! */ fio->maxrecsize = recsize; fio->maxblksize = blksize; fio->_ffbufsiz = blksize; /* bit size of buffer */ /* * Allocate buffer */ ptr = malloc((blksize >> 3) + 16); if (ptr == NULL) goto nomem; /* * Allocate private data area */ fio->lyr_info = (char *)calloc(sizeof(struct gen_ff), 1); if (fio->lyr_info == NULL) goto nomem; /* load up record characteristics */ ff_dat = (struct gen_ff *)fio->lyr_info; *ff_dat = _Frec_def_tab[rtype]; SET_BPTR(fio->_base, CPTR2BP(ptr)); fio->rwflag = POSITIN; fio->segbits = 0; fio->_cnt = 0; fio->_ptr = fio->_base; /* * First, open the lower layers */ nspec = spec; NEXT_SPEC(nspec); nextfio = _ffopen(name, flags, mode, nspec, stat, cbits, cblks, NULL, oinf); if (nextfio == _FFOPEN_ERR) goto badret; DUMP_IOB(fio); /* debugging only */ return(nextfio); nomem: _SETERROR(stat, FDC_ERR_NOMEM, 0); badret: if (BPTR2CP(fio->_base) != NULL) free(BPTR2CP(fio->_base)); if (fio->lyr_info != NULL) free(fio->lyr_info); return(_FFOPEN_ERR); }
/* * _cch_write * * Process write requests for the cache layer. * * Return value: * * The number of bytes transferred is returned upon successful completion. * If an error occurs, -1 is returned. * * The stat->sw_stat field is set to FFCNT upon normal return. */ ssize_t _cch_write( struct fdinfo *fio, /* ffio file descriptor. */ bitptr datptr, /* bit pointer to the user's data. */ size_t nbytes, /* Nuber of bytes to be written. */ struct ffsw *stat, /* pointer to status return word */ int fulp, /* full or partial write mode flag */ int *ubcp /* pointer to unused bit count. On return, */ /* *ubcp is updated to contain the unused bit */ /* count in the data returned. */ ) { off_t cpos; /* bit position in file */ int64 moved; /* number of bits transfered */ int64 bytes_moved; /* number of bytes transfered */ int64 morebits; /* bits moved in current iteration */ int64 numblocks; /* num of pages to process this iter */ int pgoff; off_t fileaddr; off_t eofaddr; int gb_rd; /* nonzero if pages must be read */ int valid; /* nonzero if CCH_VALIDBUFFER should */ /* be set */ int64 nbits; int64 i; int bs, nbu; off_t olpos, endpos, endoff; bitptr toptr; struct ffsw locstat; struct fdinfo *llfio; struct cch_f *cch_info; struct cch_buf *cubuf; int err; short firsteof = 0; short setfirst; CCH_DEBUG(("_cch_write EN: nbytes=%d fulp=%d ubc=%d\n",nbytes,fulp, *ubcp)); CLRSTAT(locstat); cch_info = (struct cch_f *)fio->lyr_info; nbits = BYTES2BITS(nbytes) - *ubcp; fio->rwflag = WRITIN; #if defined(__mips) || defined(_LITTLE_ENDIAN) /* Although this layer is capable of handling non-zero ubc */ /* and bitptrs that aren't on a byte boundary, we are not */ /* supporting this right now on mips systems. */ if (*ubcp != 0) { err = FDC_ERR_UBC; goto err1_ret; } if ((BPBITOFF(datptr) & 07) != 0) { err = FDC_ERR_REQ; goto err1_ret; } #endif if (nbits == 0) { /* quick return for nbits == 0*/ SETSTAT(stat, FFCNT, 0); return(0); } /* * Move data from user to buffer */ llfio = fio->fioptr; bs = cch_info->bsize; /* bit size of each buffer */ cpos = cch_info->cpos; /* current file position */ olpos = cpos; /* save original position */ fileaddr = CCHFLOOR(cpos,bs); /* bit offset within the file of the * start of the current page */ if (cpos > cch_info->fsize) { firsteof = 1; /* Is the page with eof in memory? */ /* If so, zero out the portion beyond eof. */ eofaddr = CCHFLOOR(cch_info->fsize, bs); CCH_FINDBLK(cch_info, eofaddr, cubuf); if (cubuf != NULL && (cubuf->flags & CCH_ZEROED) == 0) { #ifdef CCH_SDS_SUPPORTED if (cch_info->optflags & CCHOPT_SDS) { /* should never happen */ ERETURN(stat, FDC_ERR_INTERR, 0); } #endif pgoff = cch_info->fsize - eofaddr; /* offset of eof */ /* within the page */ SET_BPTR(toptr, INC_BPTR(cubuf->buf, pgoff)); morebits = bs - pgoff; if (morebits != 0) { CCH_MEMCLEAR(toptr, morebits); } cubuf->flags |= CCH_ZEROED; } } while (nbits > 0) { /* * Find the cache buffer assigned to the current page. If * no buffer is currently assigned, then _cch_getblk assigns * one. */ pgoff = cpos - fileaddr; /* offset within the page */ numblocks = 1; /* number of of pages to prcess * in this iteration */ CCH_FINDBLK(cch_info, fileaddr, cubuf); if (cubuf == NULL) { /* if data not buffer-resident*/ if (nbits > cch_info->bypasssize #ifdef CCH_SDS_SUPPORTED && !(cch_info->optflags & CCHOPT_SDS) #endif ) { /* Maybe we can bypass buffering */ if ((morebits= _cch_bypass(cch_info, nbits, cpos, datptr, fileaddr, 'w', llfio, &locstat))>0) goto adjust; else if (morebits < 0) { /* Is it right to return the count */ /* in locstat? Because we might */ /* have read some data... */ goto er1; } /* we weren't able to bypass buffering */ } morebits = nbits; endpos = cpos + morebits; /*1 bit past the end*/ endoff = endpos - CCHFLOOR(endpos,bs); if (endpos > fileaddr + bs) { numblocks = (endpos-fileaddr-1)/bs + 1; nbu = cch_info->nbufs; /* * Handle at most a cache full at a time */ if (numblocks > nbu) { numblocks = nbu; endpos = fileaddr + nbu * bs; endoff = 0; morebits = endpos - cpos; } } /* * It is possible that the first or last * page must be read because the transfer * fills only part of these pages. In each * iteration, _cch_getblk requires that * consecutive buffer pages must all be read, * or else all be assigned without pre-reading. * The following code breaks off the current * portion of the transfer when necessary to * accomplish this. */ if (numblocks > 1) { if (numblocks == 2) { if ((pgoff == 0) != (endoff == 0)) { /* process only first page */ numblocks = 1; endoff = 0; morebits = bs - pgoff; } } else { if (pgoff) { /* process only first page */ numblocks = 1; endoff = 0; morebits = bs - pgoff; } else if (endoff) { /* process all but last page */ numblocks -= 1; endoff = 0; morebits -= endoff; } } } /* * Request that _cch_getblk read in the file * pages if partial pages of data will be * written. */ gb_rd = (pgoff || endoff); /* The pages will be valid if we do not */ /* have to read them. That's because */ /* we will be writing to the entire page */ /* The page will also be valid if we do read it */ valid = 1; setfirst = 0; if (gb_rd && #ifdef CCH_SDS_SUPPORTED !(cch_info->optflags & CCHOPT_SDS) && #endif (numblocks == 1) && ((fileaddr+bs) < cch_info->feof) && (_CCH_ALIGN(pgoff) && _CCH_ALIGN(endoff))) { /* do we really need to read the page in? */ /* if pgoff and endoff are properly aligned, */ /* we do not */ /* Note that if any part of the page is */ /* beyond feof, we want to read it in. */ /* That's because code in _cch_rdabuf */ /* that handles having a partially dirty */ /* page expects to be able to read the */ /* data preceding the dirty data */ gb_rd = 0; valid = 0; /* the page will not be valid */ setfirst = 1; } cubuf = _cch_getblk(cch_info, llfio, fileaddr, &numblocks, gb_rd, valid, &locstat); if (cubuf == NULL) { goto er1; } if (setfirst) { cubuf->firstdata = pgoff; if (endoff == 0) cubuf->lastdata = bs; else cubuf->lastdata = endoff; } if (firsteof && pgoff != 0) { /* There is a gap between the eof and */ /* this data. Zero it if necessary. */ if ((cubuf->flags & CCH_ZEROED) == 0) { int zbits; #ifdef CCH_SDS_SUPPORTED if (cch_info->optflags & CCHOPT_SDS) { /* should never happen */ ERETURN(stat, FDC_ERR_INTERR, 0); } #endif if ((eofaddr == fileaddr)) { /* the eof is on this page */ zbits = bs - (cch_info->fsize - eofaddr); SET_BPTR(toptr, INC_BPTR(cubuf->buf, (cch_info->fsize - eofaddr))); } else { /* the eof is not on this page */ /* zero the entire page */ zbits = bs; toptr = cubuf->buf; } CCH_MEMCLEAR(toptr, zbits); cubuf->flags |= CCH_ZEROED; } } morebits = MIN(nbits, bs * numblocks - pgoff); /* remember the last buffer page for next time */ cch_info->cubuf = cubuf + numblocks - 1; } else { morebits = MIN(nbits, bs - pgoff); if (!(cubuf->flags & CCH_VALIDBUFFER)) { /* The buffer is there, but it */ /* is not entirely valid, because */ /* we never read into it. */ /* We can continue to just dirty it, */ /* provided that the dirty part is */ /* contiguous, and is properly aligned */ endoff = pgoff + morebits; if ((pgoff == cubuf->lastdata && _CCH_ALIGN(endoff))|| (endoff == cubuf->firstdata && _CCH_ALIGN(pgoff)) || (pgoff >= cubuf->firstdata && endoff <= cubuf->lastdata)) { cubuf->firstdata = MIN(pgoff, cubuf->firstdata); cubuf->lastdata = MAX(endoff, cubuf->lastdata); if (cubuf->firstdata == 0 && cubuf->lastdata == bs) { cubuf->lastdata = 0; cubuf->flags |=CCH_VALIDBUFFER; } } else { /* We can't just keep on putting */ /* stuff in the buffer without */ /* prereading it. So, we will call */ /* _cch_rdabuf, which has the */ /* smarts to read only the non-dirty */ /* parts */ if (_cch_rdabuf(cch_info, llfio, cubuf, BITS2BYTES(cch_info->bsize), BITS2BYTES(cubuf->filead), 1, 's', &locstat)) { goto er1; } } } } for (i=0; i<numblocks; i++) { /* adjust last access time */ CCH_CHRONOMETER(cubuf[i],cch_info); cubuf[i].flags |= CCH_DIRTY; } SET_BPTR(toptr, INC_BPTR(cubuf->buf, pgoff)); #ifdef CCH_SDS_SUPPORTED if (cch_info->optflags & CCHOPT_SDS) { if (_sds_fr_mem(toptr, datptr, morebits) == ERR) ERETURN(stat, errno, 0); } else _CCH_MOV_BITS(toptr, datptr, morebits); /* contiguous bufs */ #else _CCH_MOV_BITS(toptr, datptr, morebits); /* contiguous bufs */ #endif adjust: SET_BPTR(datptr, INC_BPTR(datptr, morebits)); cpos += morebits; nbits -= morebits; fileaddr = CCHFLOOR(cpos,bs); /* bit offset within the file of the page */ firsteof = 0; if (cpos > cch_info->fsize) { cch_info->fsize = cpos; } } cch_info->cpos = cpos; moved = cpos - olpos; fio->recbits += moved; bytes_moved = BITS2BYTES(moved); SETSTAT(stat, FFCNT, bytes_moved); return(bytes_moved); err1_ret: ERETURN(stat, err, 0); er1: *stat = locstat; return(ERR); }