/* * lock listio requests * The lock layer will be called only if the lower layer can handle listio * requests. */ _lock_listio( int cmd, /* LC_WAIT or LC_START */ struct fflistreq *list, /* list of requests (see fflistio) */ int nreq, /* number of requests */ struct ffsw *stat) /* status structure */ { int i; struct fdinfo *llfio; struct fdinfo *first_fio; struct fdinfo *fio; struct fflistreq *copy; /* copy of the list of requests */ int numdone; int curent; int pos; bitptr buf; int nb; int zero; int ret; int nstr; struct ffsw locstat; struct ffsw *istat; if (nreq == 0) return(0); first_fio = GETIOB(list[0].li_fildes); for (i = 0; i < nreq; i++) { fio = GETIOB(list[i].li_fildes); if (fio != first_fio) { _SETERROR(stat, FDC_ERR_LSTIO, 0); return (-1); } } llfio = first_fio->fioptr; if (llfio->can_listio) { copy = malloc(nreq * sizeof(*list)); if (copy == NULL) ERETURN(stat, FDC_ERR_NOMEM, 0); for (i = 0; i < nreq; i++) { copy[i] = list[i]; /* copy the entry */ copy[i].li_fildes = (int)llfio; } LYR_LOCK(fio); numdone = XRCALL(llfio, listiortn) cmd, copy, nreq, stat); LYR_UNLOCK(fio); free(copy); return(numdone); }
int _ff_err(struct fdinfo *fio, bitptr bufptr, size_t nbytes, struct ffsw *stat, int fulp, int *ubc) { int na; #ifdef _CRAY NUMARG(na); #else na = 6; #endif if (na == 6 || na == 5) /* if read/write[ca] */ _SETERROR(stat, FDC_ERR_NOSUP, 0) else abort(); return(ERR); }
/* * _ff_err2 is used when the stat parameter is param #2 */ int _ff_err2( struct fdinfo *fio, struct ffsw *stat) { int na; #ifdef _UNICOS NUMARG(na); #else na = 2; #endif if (na == 2) _SETERROR(stat, FDC_ERR_NOSUP, 0) else abort(); return(ERR); }
/* * trace listio requests * The trace layer indicates that it can handle listio requests iff * the lower layer can. */ _trc_listio( int cmd, /* LC_WAIT or LC_START */ struct fflistreq *list, /* list of requests (see fflistio) */ int nreq, /* number of requests */ struct ffsw *stat) /* status structure */ { int ret; int i; struct fdinfo *llfio; struct fdinfo *first_llfio; struct fdinfo *fio; struct fflistreq *copy; /* copy of the list of requests */ if (nreq == 0) return(0); copy = malloc(nreq * sizeof(*list)); if (copy == NULL) ERETURN(stat, FDC_ERR_NOMEM, 0); for (i = 0; i < nreq; i++) { fio = GETIOB(list[i].li_fildes); llfio = fio->fioptr; if (i == 0) first_llfio = llfio; else if (llfio != first_llfio) { _SETERROR(list[i].li_status, FDC_ERR_LSTIO, 0); continue; } copy[i] = list[i]; /* copy the entry */ copy[i].li_fildes = (int)llfio; /* pass it on to lower layer */ _trace_listio(fio, i, cmd, ©[i], nreq); } ret = XRCALL(llfio, listiortn) cmd, copy, nreq, stat); free(copy); return(ret); }
int _cdc_open( const char *name, int flags, int mode, struct fdinfo *fio, union spec_u *spec, struct ffsw *stat, long cbits, int cblks, struct gl_o_inf *oinf) { int nextfio = 0; int ll_blocked; char *ptr; union spec_u *nspec; int recsize, blksize; struct ffsw *dumstat; struct cdc_f *cdc_info; recsize = 0; /* this is ignored */ /* * Blocksize is 512 60 bit words, or 5120 6-bit characters */ blksize = 5120*6; /* other block sizes not allowed */ /* * Internally, both blksize and recsize are in bits! */ switch(spec->fld.recfmt) { case TR_CDC_CZ: fio->maxrecsize = recsize; break; case TR_CDC_CS: case TR_CDC_IW: case TR_CDC_CW: fio->maxrecsize = -1; break; } fio->maxblksize = blksize; /* * Allocate buffer: * block size plus possible 48 bit block terminator plus one 60-bit word * plus 16 slop bytes. */ fio->_ffbufsiz = blksize + 48 + 64 + 64 + 7; /* bufsiz in bytes + fudge */ ptr = malloc((fio->_ffbufsiz >> 3) + 16); if (ptr == NULL) goto nomem; /* * Allocate private storage area */ cdc_info = (struct cdc_f *)calloc(sizeof(struct cdc_f), 1); if (cdc_info == NULL) goto nomem; fio->lyr_info = (char *)cdc_info; SET_BPTR(fio->_base, CPTR2BP(ptr)); fio->rwflag = POSITIN; fio->segbits = 0; fio->_cnt = 0; fio->_ptr = fio->_base; /* * Now, open the lower layers... */ nspec = spec; NEXT_SPEC(nspec); nextfio = _ffopen(name, flags, mode, nspec, stat, cbits, cblks, NULL, oinf); if (nextfio < 0) goto badret; fio->fioptr = (struct fdinfo *)nextfio; XRCALL(fio->fioptr, fcntlrtn) fio->fioptr, FC_GETINFO, &cdc_info->ffci, &dumstat); ll_blocked = cdc_info->ffci.ffc_flags & FFC_REC; switch(fio->subtype) { case TR_CDC_BT_DISK: break; /* either record or stream is OK */ case TR_CDC_BT_SI: case TR_CDC_BT_I: if (ll_blocked == 0) /* if not blocked */ { _SETERROR(stat, FDC_ERR_NOBDRY, 0); goto badret; } break; } DUMP_IOB(fio); /* debugging only */ return(nextfio); nomem: _SETERROR(stat, FDC_ERR_NOMEM, 0); badret: if (nextfio > 0) XRCALL(fio->fioptr, closertn) fio->fioptr, &dumstat); if (BPTR2CP(fio->_base) != NULL) free(BPTR2CP(fio->_base)); if (fio->lyr_info != NULL) free(fio->lyr_info); return(ERR); }
struct cch_buf * _cch_getblk( struct cch_f *cch_info, /* cch_f structure for the file */ struct fdinfo *llfio, /* ffio file descriptor for underlying layer */ off_t fileaddr, /* bit offset within the file of the buffer. * This number must be a multiple of the buffer * size. */ int64 *nblkp, /* on input, the number of contiguous buffer * blocks sought. On output, assigned the * actual number of contiguous buffer blocks * assigned. */ int rd, /* 0 if all of the new blocks may be * assigned without reading the file page. * != 0 if the pages must be read. */ int valid, /* 0 if the CCH_VALIDBUFFER bit should */ /* not be set in the new blocks */ struct ffsw *stat /* pointer to status return word */ ) { int i, nbu, ret; int bs; int lru_id; /* buffer number of least recently */ /* used buffer. */ int limit; int64 nblk; off_t endaddr, firstpaddr, faddr; long *wptr; long lru_tm; struct cch_buf *cubuf; struct cch_buf *cbufs; struct cch_buf *fb; CCH_DEBUG(("_cch_getblk EN: to bit offset %d\n",fileaddr)); nbu = cch_info->nbufs; cbufs = cch_info->bufs; bs = cch_info->bsize; nblk = *nblkp; if (nblk > 1) { /* * Find the first page in the consecutive list of pages which * is buffer-resident. */ endaddr = fileaddr + nblk * bs; firstpaddr = endaddr; for (i=0; i<nbu; i++) { off_t x; cubuf = &cbufs[i]; x = cubuf->filead; if (fileaddr <= x && x < firstpaddr) firstpaddr = x; } if (firstpaddr < endaddr) /* a page is buffer resident */ nblk = *nblkp = (firstpaddr - fileaddr) / bs; if (nblk <= 0) return((struct cch_buf *)NULL); /* shouldn't happen ! */ } /* * Find the least-recently accessed sequence of *nblkp contiguous buffers. * Free buffers are counted as if their last access time was 0. * Search the buffers in groups of size nblk to speed this search and * reduce fragmentation of the cache. When nblk>1, this algorithm * approximates LRU and, most importantly, is deterministic. */ lru_tm = MAXLONG; /* min _rtc() value in upcoming loop */ lru_id = 0; for (i=0; i<(nbu-nblk+1); i+=nblk) { long last_access = 0; /* free pages have last_access == 0 */ if (cbufs[i].filead >= 0) last_access = cbufs[i].atime; if (last_access < lru_tm) { lru_tm = last_access; lru_id = i; } } /* * Use the least recently used page buffer or group of page buffers. * Flush any of these page buffers which have the dirty bit set. When * several adjacent buffers are dirty and correspond to adjacent pages * in the file, they can be flushed with one request. */ fb = &cbufs[lru_id]; for (i=0; i<nblk; i++) { int contig = 0; /* number of contiguous dirty buffers */ faddr = fb[i].filead; if (faddr == -1) continue; /* buffer is free */ while (i+contig < nblk && (fb[i+contig].flags & CCH_DIRTY) && fb[i+contig].filead == faddr) { if (fb[i+contig].lastdata || fb[i+contig].firstdata) { if (contig == 0) contig = 1; break; } contig++; faddr += bs; } if (contig > 0) { if (faddr > cch_info->fsize) { /* eof is in the last buffer */ /* clear it if necessary */ if ((fb[i+contig-1].flags & CCH_ZEROED) == 0){ bitptr toptr; off_t eofaddr; int pgoff; eofaddr = CCHFLOOR(cch_info->fsize, bs); pgoff = cch_info->fsize - eofaddr; SET_BPTR(toptr, INC_BPTR(fb[i+contig-1].buf, pgoff)); CCH_MEMCLEAR(toptr,(bs - pgoff)); fb[i+contig-1].flags |= CCH_ZEROED; } } ret = _cch_wrabuf(cch_info, llfio, &fb[i], BITS2BYTES(bs), BITS2BYTES(fb[i].filead), contig, &cch_info->feof, #if defined(__mips) || defined(_LITTLE_ENDIAN) 's', /* flush synchronously */ #else 'a', /* flush asynchronously */ #endif stat); if (ret == ERR) return((struct cch_buf *)NULL); i += contig - 1; } } /* * Wait for any active page buffer I/O, and then requisition the buffers * for the appropriate file pages. */ for (i=0; i<nblk; i++) { if (fb[i].flags & (CCH_WRITING | CCH_READING)) { CCHWAITIO(llfio,&fb[i],stat,ret); if (ret == ERR) return((struct cch_buf *)NULL); } fb[i].filead = fileaddr + i * bs; fb[i].flags = CCH_VALID; fb[i].firstdata = fb[i].lastdata = 0; if (valid) fb[i].flags |= CCH_VALIDBUFFER; } /* * Now start the synchronous reading of the file page into the buffer. If * all of the pages lie beyond the EOF, then suppress the read. */ if (rd) { if (fileaddr < cch_info->feof) { int by_tran; fb->sw.sw_flag = 0; /* indicate I/O in progress */ ret = _cch_rdabuf(cch_info, llfio, fb, BITS2BYTES(bs), BITS2BYTES(fb->filead), nblk, 's',stat); if (ret == ERR) return((struct cch_buf *)NULL); /* * Zero portions of the buffers past the end of file. */ by_tran = fb->sw.sw_count; #ifdef CCH_SDS_SUPPORTED if (cch_info->optflags & CCHOPT_SDS) { int ret; ret = _sdsset( (BPTR2CP(fb->buf) - (char*)NULL) + by_tran, 0, nblk * BITS2BYTES(bs) - by_tran); if (ret == ERR) { _SETERROR(stat, errno, 0); return((struct cch_buf *)NULL); } } else #endif { if ((nblk*BITS2BYTES(bs)-by_tran) != 0) (void)memset(BPTR2CP( fb->buf) + by_tran, 0, nblk * BITS2BYTES(bs) - by_tran); } for (i=0; i<nblk; i++) { fb[i].flags |= CCH_ZEROED; } } else { /* page lies beyond EOF */ /* * Zero the entire buffer. */ #ifdef CCH_SDS_SUPPORTED if (cch_info->optflags & CCHOPT_SDS) { int ret; ret = _sdsset( (BPTR2CP(fb->buf) - (char*)NULL), 0, nblk * BITS2BYTES(bs)); if (ret == ERR) { _SETERROR(stat, errno, 0); return((struct cch_buf *)NULL); } for (i=0; i<nblk; i++) { fb[i].flags |= CCH_ZEROED; } } else #endif if (fileaddr < cch_info->fsize){ /* this block is between cch_info->feof and */ /* cch_info->fsize, so we must zero it */ /* Logic in other parts of this layer will */ /* only zero what is beyond cch_info->fsize */ #ifdef _CRAY1 wptr = BPTR2WP(fb->buf); limit = (nblk * bs) >> 6; /* convert to words */ /* this loop vectorizes! */ for (i=0; i<limit; i++) wptr[i] = 0; #else memset(BPTR2CP(fb->buf), 0, (nblk * BITS2BYTES(bs))); #endif for (i=0; i<nblk; i++) { fb[i].flags |= CCH_ZEROED; } } } }
/* * Write an EOF. * If we can write a tape mark, do so. * Otherwise, return an error. */ int _tmf_weof(struct fdinfo *fio, struct ffsw *stat) { register int ret; struct tmfio *xfinfo; tmfwfm_t ctl; xfinfo = (struct tmfio *)fio->lyr_info; if (xfinfo->tmf_rwtpmk == 0) ERETURN(stat, FDC_ERR_NWEOF, 0); if (xfinfo->rwflag == READIN) { /* * Issue an error if we are not positioned at a record * boundary. ffweof would terminate the current record, but * _cos_write overwrites the current record. We need to * decide which is the proper approach before permitting this * here. */ if (xfinfo->tmf_base != xfinfo->tmf_bufptr) { _SETERROR(stat, FDC_ERR_NOTREC, 0); return(ERR); } ret = _tmf_wrard(fio, stat); if (ret < 0) return(ERR); } if(_tmf_flush(fio, stat) == ERR) { return(ERR); } xfinfo->rwflag = WRITIN; (void) memset(&ctl, 0, sizeof(ctl)); ctl.rh.request = TR_WFM; ctl.rh.length = sizeof(tmfwfm_t) - sizeof(tmfreqhdr_t); ctl.count = 1; ret = ioctl(xfinfo->tmf_fd, TMFC_DMNREQ, &ctl); if (ret < 0) { if (xfinfo->tmf_eovon && !xfinfo->tmf_speov) { /* The user has enabled eov processing */ /* Determine whether we hit EOV */ if (errno == ENOSPC) { /* This is eov */ /* We need to save away the */ /* unwritten tapemark */ /* and set a flag so we can */ /* tell the user eov was reached. */ /* This user's write will return a good status */ xfinfo->tmf_eovhit = 1; xfinfo->tmf_tpmk = 1; xfinfo->tmf_bufptr = xfinfo->tmf_base; xfinfo->tmf_cnt = 0; return(0); } } ERETURN(stat, errno, 0); } SETSTAT(stat, FFEOF, 0); return(0); }
/* * _cca_listio * * Issue a listio request for the cachea layer. * * Return Value: * * On success, nreq is returned, and the contents of the stat structure are * unspecified. * * If an error in setup is encountered, stat is set as follows: * * stat->sw_error = error code * stat->sw_stat = FFERR * stat->sw_flag = 1 * stat->sw_count = 0 * * If an error in I/O request I is detected, the list[I].li_stat * structure will be set as follows: * * list[I].li_stat->sw_error = error code * list[I].li_stat->sw_flag = 1 */ _cca_listio( int cmd, /* LC_START or LC_START */ struct fflistreq *list, /* list of requests (see fflistio) */ int nreq, /* number of requests */ struct ffsw *stat) /* status structure */ { int ret; int i; int n_handled; int status; int zero; int pos; bitptr buf; struct ffsw loc_stat; struct fdinfo *fio; struct fdinfo *oldfio; struct cca_f *cca_info; n_handled = 0; oldfio = GETIOB(list[0].li_fildes); cca_info = (struct cca_f *)oldfio->lyr_info; for (i = 0; i < nreq; i++) { fio = GETIOB(list[i].li_fildes); if (fio != oldfio) { _SETERROR(list[i].li_status, FDC_ERR_LSTIO, 0); continue; } if ( list[i].li_signo != 0 ) { _SETERROR(list[i].li_status, FDC_ERR_REQ, 0); continue; } cca_info = (struct cca_f *)fio->lyr_info; CLRFFSTAT(*(list[i].li_status)); SET_BPTR(buf, CPTR2BP(list[i].li_buf)); if ( list[i].li_nstride > 1 ) { status = _ffcompound(&list[i]); if (status == 0) n_handled++; continue; } if ( list[i].li_flags == LF_LSEEK ) { pos = _cca_seek(fio, list[i].li_offset, SEEK_SET, &loc_stat); if (pos == -1) { *list[i].li_status = loc_stat; continue; } } else if (list[i].li_flags != 0) { _SETERROR(list[i].li_status, FDC_ERR_REQ, 0); } zero = 0; status = 0; if ( cmd == LC_START ) { if ( list[i].li_opcode == LO_READ ) { status = _cca_reada(fio, buf, list[i].li_nbyte, list[i].li_status, FULL, &zero ); } else if (list[i].li_opcode == LO_WRITE ) { status = _cca_writea(fio, buf, list[i].li_nbyte, list[i].li_status, FULL, &zero ); } else { _SETERROR(list[i].li_status, FDC_ERR_REQ, 0); } } else if ( cmd == LC_WAIT ) { if ( list[i].li_opcode == LO_READ ) { status = _cca_read(fio, buf, list[i].li_nbyte, list[i].li_status, FULL, &zero ); } else if (list[i].li_opcode == LO_WRITE ) { status = _cca_write(fio, buf, list[i].li_nbyte, list[i].li_status, FULL, &zero ); } else { _SETERROR(list[i].li_status, FDC_ERR_REQ, 0); } } else { _SETERROR(list[i].li_status, FDC_ERR_REQ, 0); } if (status == ERR) { continue; } n_handled++; } return( n_handled ); }
_ffopen_t _gen_fopen( const char *name, int flags, mode_t mode, struct fdinfo *fio, union spec_u *spec, struct ffsw *stat, long cbits, int cblks, struct gl_o_inf *oinf) { char *ptr; union spec_u *nspec; long recsize, blksize; /* bits */ long rsz, mbs; /* bytes */ _ffopen_t nextfio; int rtype; struct gen_ff *ff_dat; /* * convert 8-bit bytes to bits */ rsz = spec->fld.recsize; mbs = spec->fld.mbs; rtype = spec->fld.recfmt; if (rtype < 0 || rtype >= NUM_F_TYPES) { _SETERROR(stat, FDC_ERR_BADSPC, 0); return(_FFOPEN_ERR); } /* * General limit checks from table. */ if (rsz == 0) { _SETERROR(stat, FDC_ERR_BADSPC, 0); goto badret; } if (rsz < _F_limits[rtype].min_rsz || rsz > _F_limits[rtype].max_rsz) { _SETERROR(stat, FDC_ERR_BADSPC, 0); goto badret; } if (mbs != 0) if (mbs < _F_limits[rtype].min_mbs || mbs > _F_limits[rtype].max_mbs) { _SETERROR(stat, FDC_ERR_BADSPC, 0); goto badret; } switch(rtype) { case TR_IBM_F: /* * if mbs and rsz specified with * F format and mbs != rsz then error */ if (mbs != rsz && mbs != 0) { _SETERROR(stat, FDC_ERR_BADSPC, 0); goto badret; } case TR_IBM_FB: if (mbs == 0) mbs = rsz; /* dflt mbs = rsz */ /* must be exact multiple */ if ((mbs % rsz) != 0) { _SETERROR(stat, FDC_ERR_BADSPC, 0); goto badret; } break; case TR_VMS_F_DSK: case TR_VMS_F_TP: case TR_VMS_F_TR: if (mbs == 0) /* unspecified */ { /* deflt mbs=rsz */ if (rtype != TR_VMS_F_TP) /* deflt mbs=rsz */ mbs = rsz; else if(rtype == TR_VMS_F_TP) { /* dflt mbs=2048 */ mbs = 2048; if (rsz > mbs) mbs = rsz; } } if (rsz > mbs) { _SETERROR(stat, FDC_ERR_BADSPC, 0); goto badret; } break; default: _SETERROR(stat, FDC_ERR_BADSPC, 0); goto badret; } recsize = rsz << 3; blksize = mbs << 3; /* * Internally, both blksize and recsize are in bits! */ fio->maxrecsize = recsize; fio->maxblksize = blksize; fio->_ffbufsiz = blksize; /* bit size of buffer */ /* * Allocate buffer */ ptr = malloc((blksize >> 3) + 16); if (ptr == NULL) goto nomem; /* * Allocate private data area */ fio->lyr_info = (char *)calloc(sizeof(struct gen_ff), 1); if (fio->lyr_info == NULL) goto nomem; /* load up record characteristics */ ff_dat = (struct gen_ff *)fio->lyr_info; *ff_dat = _Frec_def_tab[rtype]; SET_BPTR(fio->_base, CPTR2BP(ptr)); fio->rwflag = POSITIN; fio->segbits = 0; fio->_cnt = 0; fio->_ptr = fio->_base; /* * First, open the lower layers */ nspec = spec; NEXT_SPEC(nspec); nextfio = _ffopen(name, flags, mode, nspec, stat, cbits, cblks, NULL, oinf); if (nextfio == _FFOPEN_ERR) goto badret; DUMP_IOB(fio); /* debugging only */ return(nextfio); nomem: _SETERROR(stat, FDC_ERR_NOMEM, 0); badret: if (BPTR2CP(fio->_base) != NULL) free(BPTR2CP(fio->_base)); if (fio->lyr_info != NULL) free(fio->lyr_info); return(_FFOPEN_ERR); }
_ffopen_t _gen_xopen( const char *name, int flags, mode_t mode, struct fdinfo *fio, union spec_u *spec, struct ffsw *stat, long cbits, int cblks, struct gl_o_inf *oinf ) { char *ptr; union spec_u *nspec; int blksize; _ffopen_t nextfio; int isvalid; struct gen_xf *xf_info; /* * Allocate private storage */ xf_info = (struct gen_xf *)calloc(sizeof(struct gen_xf),1); if (xf_info == NULL) goto nomem; fio->lyr_info = (char *)xf_info; /* * select parameters based on record type */ switch(fio->rtype) { case TR_NVE_V: xf_info->rdwlen = 112; /* bits */ break; case TR_CRAY_V: xf_info->rdwlen = 64; /* bits */ break; #ifdef _OLD_F77 case TR_UX_VAX: case TR_UX_SUN: xf_info->rdwlen = 32; /* bits */ break; #endif case TR_205_W: xf_info->rdwlen = 64; /* bits */ break; } xf_info->last_lrdwaddr = 0; xf_info->lrdwaddr = 0; /* * Record the maximum record size in bits. * A value of 0 is stored if this is unspecified. */ fio->maxrecsize = _ff_nparm_getv(spec, 1, &isvalid) * 8; /* * Record the buffer size in bits. */ blksize = _ff_nparm_getv(spec, 2, &isvalid) * 8; if (! isvalid || blksize < 256) /* bits, mighty small! */ blksize = X_BUFSIZ * BITPBLOCK; else blksize = (blksize + 077) & (~077);/* round to word size */ /* * Although the _ffbufsiz field is declared as long, * these routines use GETDATA and PUTDATA. Those macros * assign the amount to be written to integers. So, to * make this all work we need to be sure that the buffer size * does not exceed the size of an integer. */ if (blksize > (1<<sizeof(int)*8-5)){ _SETERROR(stat, FDC_ERR_BUFSIZ, 0); goto badret; } fio->_ffbufsiz = blksize; /* bit size of buffer */ ptr = malloc((blksize >> 3) + 16); if (ptr == NULL) goto nomem; SET_BPTR(fio->_base, CPTR2BP(ptr)); fio->scc = SCCFULL; fio->lastscc = SCCFULL; fio->rwflag = POSITIN; fio->segbits = 0; fio->_cnt = 0; fio->_ptr = fio->_base; /* * Now, open the lower layers */ nspec = spec; NEXT_SPEC(nspec); nextfio = _ffopen(name, flags, mode, nspec, stat, cbits, cblks, NULL, oinf); if (nextfio < 0) goto badret; DUMP_IOB(fio); /* debugging only */ return(nextfio); nomem: _SETERROR(stat, FDC_ERR_NOMEM, 0); badret: if (BPTR2CP(fio->_base) != NULL) free(BPTR2CP(fio->_base)); if (fio->lyr_info != NULL) free(fio->lyr_info); return(_FFOPEN_ERR); }
_ffopen_t _cch_open( const char *name, int oflags, mode_t mode, struct fdinfo *fio, union spec_u *spec, struct ffsw *stat, long cbits, int cblks, struct gl_o_inf *oinf) { int i; _ffopen_t nextfio; int nb; /* number of buffers */ int64 bs; /* size of each buffer in bits */ int ret; int isvalid; char *s; bitptr bptr; struct fdinfo *nfioptr; union spec_u *nspec; struct cch_f *cch_info; struct cch_buf *cbufs; struct stat fstat; struct ffsw clstat; struct ffc_info_s layer_info; int64 bypass; #ifdef __mips struct dioattr dio; int o_direct = 0; #endif #if defined(_CRAY1) || defined(_CRAYMPP) oflags |= O_RAW; /* We believe that bypassing system cache * enhances performance in most cases. */ #endif /* * Allocate the layer-specific data area. */ cch_info = (struct cch_f *)calloc(1, sizeof(struct cch_f)); if (cch_info == NULL) goto nomem; fio->lyr_info = (char *)cch_info; cch_info->optflags = 0; /* initially, no special options */ /* * Get values from the FFIO spec. */ #ifdef CCH_SDS_SUPPORTED if (spec->fld.recfmt == TR_CCH_SDS) { cch_info->optflags |= CCHOPT_SDS; oflags |= O_SSD; /* request I/O betw disk and SDS */ } #endif bs = _ff_nparm_getv(spec, 1, &isvalid) * BITPBLOCK; if (!isvalid) { #ifdef _UNICOS_MAX if (_MPP_MPPSIM > 0) { /* Running on simulator in user virtual mode. Simulator can */ /* not handle large reads/writes, so restrict the size */ bs = CCH_DEF_SIMBUFSIZ * BITPBLOCK; } else #endif bs = CCH_DEF_BUFSIZ * BITPBLOCK; /* set default bs */ } if (bs <= 0 || bs >= CCH_MAX_BBUFSIZ) { _SETERROR(stat, FDC_ERR_BUFSIZ, 0); goto badret; } nb = _ff_nparm_getv(spec, 2, &isvalid); if (!isvalid) nb = CCH_DEF_NBUF; /* set default nb */ if (nb <= 0) { _SETERROR(stat, FDC_ERR_NBUF0, 0); goto badret; } cch_info->nbufs = nb; cch_info->bufs = NULL; /* * Set flag if -m on is assigned. */ #ifdef __mips if (oflags & O_DIRECT) o_direct = 1; #endif if (oinf->aip != NULL) { if (oinf->aip->m_multup_flg && oinf->aip->m_multup) { cch_info->is_multup = 1; oinf->aip->m_multup_flg |= ATTR_USED; } #ifdef __mips if (oinf->aip->B_direct_flg) { if (oinf->aip->B_direct) o_direct = 1; else o_direct = 0; } #endif } /* * Allocate the buffer control blocks contiguously. */ if ((cch_info->bufs = (struct cch_buf *)calloc(nb, sizeof(struct cch_buf))) == NULL) goto nomem; /* * Get the FFIO spec for the next lower layer. */ nspec = spec; NEXT_SPEC(nspec); /* * Open the layers below this one. */ nextfio = _ffopen(name, oflags, mode, nspec, stat, cbits, cblks, NULL, oinf); if (nextfio == _FFOPEN_ERR) goto badret; nfioptr = (struct fdinfo *)nextfio; /* * Get information about the underlying layer. */ ret = XRCALL(nfioptr,fcntlrtn) nfioptr, FC_STAT, &fstat, stat); if (ret == ERR) goto close_badret; ret = XRCALL(nfioptr,fcntlrtn) nfioptr, FC_GETINFO, &layer_info, stat); if (ret == ERR) goto close_badret; if ( layer_info.ffc_flags & FFC_CANSYLISTIO ) cch_info->do_sylistio = 1; #ifdef __mips /* * Have we been requested to open with O_DIRECT? */ if (o_direct) { int nflag; int64 bsbyt; bsbyt = bs/8; /* convert buffer size to bytes */ /* determine buffer size requirements for O_DIRECT */ ret = XRCALL(nfioptr,fcntlrtn) nfioptr, FC_DIOINFO, &dio, stat); if (ret == ERR) goto close_badret; /* Adjust the size of the buffers for O_DIRECT's requirements.*/ if (bsbyt % dio.d_miniosz != 0){ /* We need to write in these units. */ bsbyt = bsbyt - bsbyt%dio.d_miniosz; } if (bsbyt < dio.d_miniosz) { bsbyt = dio.d_miniosz; } else if (bsbyt > dio.d_maxiosz) { bsbyt = dio.d_maxiosz; } if (bsbyt % dio.d_mem != 0){ /* Each buffer needs to be memaligned */ /* Since this layer expects all buffers */ /* to be contiguous, we're out of luck. */ errno = FDC_ERR_BUFSIZ; goto close_badret; } bs = bsbyt*8; /* convert back to bits */ cch_info->maxiosize = dio.d_maxiosz; cch_info->miniosize = dio.d_miniosz; cch_info->chunksize = dio.d_miniosz; cch_info->diskalign = dio.d_miniosz; cch_info->memalign = dio.d_mem; cch_info->odirect = 1; } else {