static int do_readbytes(struct rx_call *call, afs_int32 bytes) { struct iovec tiov[RX_MAXIOVECS]; afs_int32 size; int tnio; int code; while (bytes > 0) { size = rxread_size; if (size > bytes) size = bytes; if (use_rx_readv) { if (size > RX_MAX_PACKET_DATA_SIZE) size = RX_MAX_PACKET_DATA_SIZE; code = rx_Readv(call, tiov, &tnio, RX_MAXIOVECS, size); } else code = rx_Read(call, somebuf, size); if (code != size) return 1; bytes -= size; } return 0; }
/* no-cache prefetch routine */ static afs_int32 afs_NoCacheFetchProc(struct rx_call *acall, struct vcache *avc, struct uio *auio, afs_int32 release_pages, afs_int32 size) { afs_int32 length; afs_int32 code; int moredata, iovno, iovoff, iovmax, result, locked; struct iovec *ciov; struct iovec *rxiov; int nio = 0; bypass_page_t pp; int curpage, bytes; int pageoff; rxiov = osi_AllocSmallSpace(sizeof(struct iovec) * RX_MAXIOVECS); ciov = auio->uio_iov; pp = (bypass_page_t) ciov->iov_base; iovmax = auio->uio_iovcnt - 1; iovno = iovoff = result = 0; do { COND_GUNLOCK(locked); code = rx_Read(acall, (char *)&length, sizeof(afs_int32)); COND_RE_GLOCK(locked); if (code != sizeof(afs_int32)) { result = EIO; afs_warn("Preread error. code: %d instead of %d\n", code, (int)sizeof(afs_int32)); unlock_and_release_pages(auio); goto done; } else length = ntohl(length); if (length > size) { result = EIO; afs_warn("Preread error. Got length %d, which is greater than size %d\n", length, size); unlock_and_release_pages(auio); goto done; } /* If we get a 0 length reply, time to cleanup and return */ if (length == 0) { unlock_and_release_pages(auio); result = 0; goto done; } /* * The fetch protocol is extended for the AFS/DFS translator * to allow multiple blocks of data, each with its own length, * to be returned. As long as the top bit is set, there are more * blocks expected. * * We do not do this for AFS file servers because they sometimes * return large negative numbers as the transfer size. */ if (avc->f.states & CForeign) { moredata = length & 0x80000000; length &= ~0x80000000; } else { moredata = 0; } for (curpage = 0; curpage <= iovmax; curpage++) { pageoff = 0; /* properly, this should track uio_resid, not a fixed page size! */ while (pageoff < auio->uio_iov[curpage].iov_len) { /* If no more iovs, issue new read. */ if (iovno >= nio) { COND_GUNLOCK(locked); bytes = rx_Readv(acall, rxiov, &nio, RX_MAXIOVECS, length); COND_RE_GLOCK(locked); if (bytes < 0) { afs_warn("afs_NoCacheFetchProc: rx_Read error. Return code was %d\n", bytes); result = bytes; unlock_and_release_pages(auio); goto done; } else if (bytes == 0) { /* we failed to read the full length */ result = EIO; afs_warn("afs_NoCacheFetchProc: rx_Read returned zero. Aborting.\n"); unlock_and_release_pages(auio); goto done; } size -= bytes; auio->uio_resid -= bytes; iovno = 0; } pp = (bypass_page_t)auio->uio_iov[curpage].iov_base; if (pageoff + (rxiov[iovno].iov_len - iovoff) <= auio->uio_iov[curpage].iov_len) { /* Copy entire (or rest of) current iovec into current page */ if (pp) afs_bypass_copy_page(pp, pageoff, rxiov, iovno, iovoff, auio, curpage, 0); length -= (rxiov[iovno].iov_len - iovoff); pageoff += rxiov[iovno].iov_len - iovoff; iovno++; iovoff = 0; } else { /* Copy only what's needed to fill current page */ if (pp) afs_bypass_copy_page(pp, pageoff, rxiov, iovno, iovoff, auio, curpage, 1); length -= (auio->uio_iov[curpage].iov_len - pageoff); iovoff += auio->uio_iov[curpage].iov_len - pageoff; pageoff = auio->uio_iov[curpage].iov_len; } /* we filled a page, or this is the last page. conditionally release it */ if (pp && ((pageoff == auio->uio_iov[curpage].iov_len && release_pages) || (length == 0 && iovno >= nio))) release_full_page(pp, pageoff); if (length == 0 && iovno >= nio) goto done; } } } while (moredata); done: osi_FreeSmallSpace(rxiov); return result; }