/* * Main */ int main(int argc, char *argv[]) { // Parse command line if(!optparse(argc, argv)) abort(); /* Nanosleep Setup */ int milisec = 250; // length of time to sleep, in miliseconds struct timespec req = {0}; req.tv_sec = 0; req.tv_nsec = milisec * 1000000L; // Initialisieren der Welt und den Zellen world_t world; /* Allocate Memory */ allocateMemory(&world); /* Welt befüllen */ if(path) { read_world(&world, path); } else if (!create_world(&world)) { printf("error creating world\n"); exit(-2); } else { fillWorld(&world); } /* Welt ausgeben */ printWorld(&world); /* * Nächster Schritt ausführen * TODO: Beenden der Schleife wenn sich nichts mehr ändert */ while (true) { (automode) ? nanosleep(&req, (struct timespec *)NULL) : bwait(); nextStep(&world); printWorld(&world); } return 0; }
/* * This is now called from local media FS's to operate against their * own vnodes if they fail to implement VOP_GETPAGES. */ int vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count, int *a_rbehind, int *a_rahead, vop_getpages_iodone_t iodone, void *arg) { vm_object_t object; struct bufobj *bo; struct buf *bp; off_t foff; #ifdef INVARIANTS off_t blkno0; #endif int bsize, pagesperblock, *freecnt; int error, before, after, rbehind, rahead, poff, i; int bytecount, secmask; KASSERT(vp->v_type != VCHR && vp->v_type != VBLK, ("%s does not support devices", __func__)); if (vp->v_iflag & VI_DOOMED) return (VM_PAGER_BAD); object = vp->v_object; foff = IDX_TO_OFF(m[0]->pindex); bsize = vp->v_mount->mnt_stat.f_iosize; pagesperblock = bsize / PAGE_SIZE; KASSERT(foff < object->un_pager.vnp.vnp_size, ("%s: page %p offset beyond vp %p size", __func__, m[0], vp)); KASSERT(count <= sizeof(bp->b_pages), ("%s: requested %d pages", __func__, count)); /* * The last page has valid blocks. Invalid part can only * exist at the end of file, and the page is made fully valid * by zeroing in vm_pager_get_pages(). */ if (m[count - 1]->valid != 0 && --count == 0) { if (iodone != NULL) iodone(arg, m, 1, 0); return (VM_PAGER_OK); } /* * Synchronous and asynchronous paging operations use different * free pbuf counters. This is done to avoid asynchronous requests * to consume all pbufs. * Allocate the pbuf at the very beginning of the function, so that * if we are low on certain kind of pbufs don't even proceed to BMAP, * but sleep. */ freecnt = iodone != NULL ? &vnode_async_pbuf_freecnt : &vnode_pbuf_freecnt; bp = getpbuf(freecnt); /* * Get the underlying device blocks for the file with VOP_BMAP(). * If the file system doesn't support VOP_BMAP, use old way of * getting pages via VOP_READ. */ error = VOP_BMAP(vp, foff / bsize, &bo, &bp->b_blkno, &after, &before); if (error == EOPNOTSUPP) { relpbuf(bp, freecnt); VM_OBJECT_WLOCK(object); for (i = 0; i < count; i++) { PCPU_INC(cnt.v_vnodein); PCPU_INC(cnt.v_vnodepgsin); error = vnode_pager_input_old(object, m[i]); if (error) break; } VM_OBJECT_WUNLOCK(object); return (error); } else if (error != 0) { relpbuf(bp, freecnt); return (VM_PAGER_ERROR); } /* * If the file system supports BMAP, but blocksize is smaller * than a page size, then use special small filesystem code. */ if (pagesperblock == 0) { relpbuf(bp, freecnt); for (i = 0; i < count; i++) { PCPU_INC(cnt.v_vnodein); PCPU_INC(cnt.v_vnodepgsin); error = vnode_pager_input_smlfs(object, m[i]); if (error) break; } return (error); } /* * A sparse file can be encountered only for a single page request, * which may not be preceded by call to vm_pager_haspage(). */ if (bp->b_blkno == -1) { KASSERT(count == 1, ("%s: array[%d] request to a sparse file %p", __func__, count, vp)); relpbuf(bp, freecnt); pmap_zero_page(m[0]); KASSERT(m[0]->dirty == 0, ("%s: page %p is dirty", __func__, m[0])); VM_OBJECT_WLOCK(object); m[0]->valid = VM_PAGE_BITS_ALL; VM_OBJECT_WUNLOCK(object); return (VM_PAGER_OK); } #ifdef INVARIANTS blkno0 = bp->b_blkno; #endif bp->b_blkno += (foff % bsize) / DEV_BSIZE; /* Recalculate blocks available after/before to pages. */ poff = (foff % bsize) / PAGE_SIZE; before *= pagesperblock; before += poff; after *= pagesperblock; after += pagesperblock - (poff + 1); if (m[0]->pindex + after >= object->size) after = object->size - 1 - m[0]->pindex; KASSERT(count <= after + 1, ("%s: %d pages asked, can do only %d", __func__, count, after + 1)); after -= count - 1; /* Trim requested rbehind/rahead to possible values. */ rbehind = a_rbehind ? *a_rbehind : 0; rahead = a_rahead ? *a_rahead : 0; rbehind = min(rbehind, before); rbehind = min(rbehind, m[0]->pindex); rahead = min(rahead, after); rahead = min(rahead, object->size - m[count - 1]->pindex); /* * Check that total amount of pages fit into buf. Trim rbehind and * rahead evenly if not. */ if (rbehind + rahead + count > nitems(bp->b_pages)) { int trim, sum; trim = rbehind + rahead + count - nitems(bp->b_pages) + 1; sum = rbehind + rahead; if (rbehind == before) { /* Roundup rbehind trim to block size. */ rbehind -= roundup(trim * rbehind / sum, pagesperblock); if (rbehind < 0) rbehind = 0; } else rbehind -= trim * rbehind / sum; rahead -= trim * rahead / sum; } KASSERT(rbehind + rahead + count <= nitems(bp->b_pages), ("%s: behind %d ahead %d count %d", __func__, rbehind, rahead, count)); /* * Fill in the bp->b_pages[] array with requested and optional * read behind or read ahead pages. Read behind pages are looked * up in a backward direction, down to a first cached page. Same * for read ahead pages, but there is no need to shift the array * in case of encountering a cached page. */ i = bp->b_npages = 0; if (rbehind) { vm_pindex_t startpindex, tpindex; vm_page_t p; VM_OBJECT_WLOCK(object); startpindex = m[0]->pindex - rbehind; if ((p = TAILQ_PREV(m[0], pglist, listq)) != NULL && p->pindex >= startpindex) startpindex = p->pindex + 1; /* tpindex is unsigned; beware of numeric underflow. */ for (tpindex = m[0]->pindex - 1; tpindex >= startpindex && tpindex < m[0]->pindex; tpindex--, i++) { p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL); if (p == NULL) { /* Shift the array. */ for (int j = 0; j < i; j++) bp->b_pages[j] = bp->b_pages[j + tpindex + 1 - startpindex]; break; } bp->b_pages[tpindex - startpindex] = p; } bp->b_pgbefore = i; bp->b_npages += i; bp->b_blkno -= IDX_TO_OFF(i) / DEV_BSIZE; } else bp->b_pgbefore = 0; /* Requested pages. */ for (int j = 0; j < count; j++, i++) bp->b_pages[i] = m[j]; bp->b_npages += count; if (rahead) { vm_pindex_t endpindex, tpindex; vm_page_t p; if (!VM_OBJECT_WOWNED(object)) VM_OBJECT_WLOCK(object); endpindex = m[count - 1]->pindex + rahead + 1; if ((p = TAILQ_NEXT(m[count - 1], listq)) != NULL && p->pindex < endpindex) endpindex = p->pindex; if (endpindex > object->size) endpindex = object->size; for (tpindex = m[count - 1]->pindex + 1; tpindex < endpindex; i++, tpindex++) { p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL); if (p == NULL) break; bp->b_pages[i] = p; } bp->b_pgafter = i - bp->b_npages; bp->b_npages = i; } else bp->b_pgafter = 0; if (VM_OBJECT_WOWNED(object)) VM_OBJECT_WUNLOCK(object); /* Report back actual behind/ahead read. */ if (a_rbehind) *a_rbehind = bp->b_pgbefore; if (a_rahead) *a_rahead = bp->b_pgafter; #ifdef INVARIANTS KASSERT(bp->b_npages <= nitems(bp->b_pages), ("%s: buf %p overflowed", __func__, bp)); for (int j = 1; j < bp->b_npages; j++) KASSERT(bp->b_pages[j]->pindex - 1 == bp->b_pages[j - 1]->pindex, ("%s: pages array not consecutive, bp %p", __func__, bp)); #endif /* * Recalculate first offset and bytecount with regards to read behind. * Truncate bytecount to vnode real size and round up physical size * for real devices. */ foff = IDX_TO_OFF(bp->b_pages[0]->pindex); bytecount = bp->b_npages << PAGE_SHIFT; if ((foff + bytecount) > object->un_pager.vnp.vnp_size) bytecount = object->un_pager.vnp.vnp_size - foff; secmask = bo->bo_bsize - 1; KASSERT(secmask < PAGE_SIZE && secmask > 0, ("%s: sector size %d too large", __func__, secmask + 1)); bytecount = (bytecount + secmask) & ~secmask; /* * And map the pages to be read into the kva, if the filesystem * requires mapped buffers. */ if ((vp->v_mount->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0 && unmapped_buf_allowed) { bp->b_data = unmapped_buf; bp->b_offset = 0; } else { bp->b_data = bp->b_kvabase; pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages); } /* Build a minimal buffer header. */ bp->b_iocmd = BIO_READ; KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); bp->b_rcred = crhold(curthread->td_ucred); bp->b_wcred = crhold(curthread->td_ucred); pbgetbo(bo, bp); bp->b_vp = vp; bp->b_bcount = bp->b_bufsize = bp->b_runningbufspace = bytecount; bp->b_iooffset = dbtob(bp->b_blkno); KASSERT(IDX_TO_OFF(m[0]->pindex - bp->b_pages[0]->pindex) == (blkno0 - bp->b_blkno) * DEV_BSIZE + IDX_TO_OFF(m[0]->pindex) % bsize, ("wrong offsets bsize %d m[0] %ju b_pages[0] %ju " "blkno0 %ju b_blkno %ju", bsize, (uintmax_t)m[0]->pindex, (uintmax_t)bp->b_pages[0]->pindex, (uintmax_t)blkno0, (uintmax_t)bp->b_blkno)); atomic_add_long(&runningbufspace, bp->b_runningbufspace); PCPU_INC(cnt.v_vnodein); PCPU_ADD(cnt.v_vnodepgsin, bp->b_npages); if (iodone != NULL) { /* async */ bp->b_pgiodone = iodone; bp->b_caller1 = arg; bp->b_iodone = vnode_pager_generic_getpages_done_async; bp->b_flags |= B_ASYNC; BUF_KERNPROC(bp); bstrategy(bp); return (VM_PAGER_OK); } else { bp->b_iodone = bdone; bstrategy(bp); bwait(bp, PVM, "vnread"); error = vnode_pager_generic_getpages_done(bp); for (i = 0; i < bp->b_npages; i++) bp->b_pages[i] = NULL; bp->b_vp = NULL; pbrelbo(bp); relpbuf(bp, &vnode_pbuf_freecnt); return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK); } }
/* * small block filesystem vnode pager input */ static int vnode_pager_input_smlfs(vm_object_t object, vm_page_t m) { struct vnode *vp; struct bufobj *bo; struct buf *bp; struct sf_buf *sf; daddr_t fileaddr; vm_offset_t bsize; vm_page_bits_t bits; int error, i; error = 0; vp = object->handle; if (vp->v_iflag & VI_DOOMED) return VM_PAGER_BAD; bsize = vp->v_mount->mnt_stat.f_iosize; VOP_BMAP(vp, 0, &bo, 0, NULL, NULL); sf = sf_buf_alloc(m, 0); for (i = 0; i < PAGE_SIZE / bsize; i++) { vm_ooffset_t address; bits = vm_page_bits(i * bsize, bsize); if (m->valid & bits) continue; address = IDX_TO_OFF(m->pindex) + i * bsize; if (address >= object->un_pager.vnp.vnp_size) { fileaddr = -1; } else { error = vnode_pager_addr(vp, address, &fileaddr, NULL); if (error) break; } if (fileaddr != -1) { bp = getpbuf(&vnode_pbuf_freecnt); /* build a minimal buffer header */ bp->b_iocmd = BIO_READ; bp->b_iodone = bdone; KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); bp->b_rcred = crhold(curthread->td_ucred); bp->b_wcred = crhold(curthread->td_ucred); bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize; bp->b_blkno = fileaddr; pbgetbo(bo, bp); bp->b_vp = vp; bp->b_bcount = bsize; bp->b_bufsize = bsize; bp->b_runningbufspace = bp->b_bufsize; atomic_add_long(&runningbufspace, bp->b_runningbufspace); /* do the input */ bp->b_iooffset = dbtob(bp->b_blkno); bstrategy(bp); bwait(bp, PVM, "vnsrd"); if ((bp->b_ioflags & BIO_ERROR) != 0) error = EIO; /* * free the buffer header back to the swap buffer pool */ bp->b_vp = NULL; pbrelbo(bp); relpbuf(bp, &vnode_pbuf_freecnt); if (error) break; } else bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize); KASSERT((m->dirty & bits) == 0, ("vnode_pager_input_smlfs: page %p is dirty", m)); VM_OBJECT_WLOCK(object); m->valid |= bits; VM_OBJECT_WUNLOCK(object); } sf_buf_free(sf); if (error) { return VM_PAGER_ERROR; } return VM_PAGER_OK; }
int physio(struct cdev *dev, struct uio *uio, int ioflag) { int i; int error; caddr_t sa; u_int iolen; struct buf *bp; /* Keep the process UPAGES from being swapped. XXX: why ? */ PHOLD(curproc); bp = getpbuf(NULL); sa = bp->b_data; error = 0; /* XXX: sanity check */ if(dev->si_iosize_max < PAGE_SIZE) { printf("WARNING: %s si_iosize_max=%d, using DFLTPHYS.\n", devtoname(dev), dev->si_iosize_max); dev->si_iosize_max = DFLTPHYS; } for (i = 0; i < uio->uio_iovcnt; i++) { while (uio->uio_iov[i].iov_len) { bp->b_flags = 0; if (uio->uio_rw == UIO_READ) { bp->b_iocmd = BIO_READ; curthread->td_ru.ru_inblock++; } else { bp->b_iocmd = BIO_WRITE; curthread->td_ru.ru_oublock++; } bp->b_iodone = bdone; bp->b_data = uio->uio_iov[i].iov_base; bp->b_bcount = uio->uio_iov[i].iov_len; bp->b_offset = uio->uio_offset; bp->b_iooffset = uio->uio_offset; bp->b_saveaddr = sa; /* Don't exceed drivers iosize limit */ if (bp->b_bcount > dev->si_iosize_max) bp->b_bcount = dev->si_iosize_max; /* * Make sure the pbuf can map the request * XXX: The pbuf has kvasize = MAXPHYS so a request * XXX: larger than MAXPHYS - PAGE_SIZE must be * XXX: page aligned or it will be fragmented. */ iolen = ((vm_offset_t) bp->b_data) & PAGE_MASK; if ((bp->b_bcount + iolen) > bp->b_kvasize) { bp->b_bcount = bp->b_kvasize; if (iolen != 0) bp->b_bcount -= PAGE_SIZE; } bp->b_bufsize = bp->b_bcount; bp->b_blkno = btodb(bp->b_offset); if (uio->uio_segflg == UIO_USERSPACE) if (vmapbuf(bp) < 0) { error = EFAULT; goto doerror; } dev_strategy(dev, bp); if (uio->uio_rw == UIO_READ) bwait(bp, PRIBIO, "physrd"); else bwait(bp, PRIBIO, "physwr"); if (uio->uio_segflg == UIO_USERSPACE) vunmapbuf(bp); iolen = bp->b_bcount - bp->b_resid; if (iolen == 0 && !(bp->b_ioflags & BIO_ERROR)) goto doerror; /* EOF */ uio->uio_iov[i].iov_len -= iolen; uio->uio_iov[i].iov_base = (char *)uio->uio_iov[i].iov_base + iolen; uio->uio_resid -= iolen; uio->uio_offset += iolen; if( bp->b_ioflags & BIO_ERROR) { error = bp->b_error; goto doerror; } } } doerror: relpbuf(bp, NULL); PRELE(curproc); return (error); }
int physio(struct cdev *dev, struct uio *uio, int ioflag) { struct buf *bp; struct cdevsw *csw; caddr_t sa; u_int iolen; int error, i, mapped; /* Keep the process UPAGES from being swapped. XXX: why ? */ PHOLD(curproc); bp = getpbuf(NULL); sa = bp->b_data; error = 0; /* XXX: sanity check */ if(dev->si_iosize_max < PAGE_SIZE) { printf("WARNING: %s si_iosize_max=%d, using DFLTPHYS.\n", devtoname(dev), dev->si_iosize_max); dev->si_iosize_max = DFLTPHYS; } /* * If the driver does not want I/O to be split, that means that we * need to reject any requests that will not fit into one buffer. */ if (dev->si_flags & SI_NOSPLIT && (uio->uio_resid > dev->si_iosize_max || uio->uio_resid > MAXPHYS || uio->uio_iovcnt > 1)) { /* * Tell the user why his I/O was rejected. */ if (uio->uio_resid > dev->si_iosize_max) uprintf("%s: request size=%zd > si_iosize_max=%d; " "cannot split request\n", devtoname(dev), uio->uio_resid, dev->si_iosize_max); if (uio->uio_resid > MAXPHYS) uprintf("%s: request size=%zd > MAXPHYS=%d; " "cannot split request\n", devtoname(dev), uio->uio_resid, MAXPHYS); if (uio->uio_iovcnt > 1) uprintf("%s: request vectors=%d > 1; " "cannot split request\n", devtoname(dev), uio->uio_iovcnt); error = EFBIG; goto doerror; } for (i = 0; i < uio->uio_iovcnt; i++) { while (uio->uio_iov[i].iov_len) { bp->b_flags = 0; if (uio->uio_rw == UIO_READ) { bp->b_iocmd = BIO_READ; curthread->td_ru.ru_inblock++; } else { bp->b_iocmd = BIO_WRITE; curthread->td_ru.ru_oublock++; } bp->b_iodone = bdone; bp->b_data = uio->uio_iov[i].iov_base; bp->b_bcount = uio->uio_iov[i].iov_len; bp->b_offset = uio->uio_offset; bp->b_iooffset = uio->uio_offset; bp->b_saveaddr = sa; /* Don't exceed drivers iosize limit */ if (bp->b_bcount > dev->si_iosize_max) bp->b_bcount = dev->si_iosize_max; /* * Make sure the pbuf can map the request * XXX: The pbuf has kvasize = MAXPHYS so a request * XXX: larger than MAXPHYS - PAGE_SIZE must be * XXX: page aligned or it will be fragmented. */ iolen = ((vm_offset_t) bp->b_data) & PAGE_MASK; if ((bp->b_bcount + iolen) > bp->b_kvasize) { /* * This device does not want I/O to be split. */ if (dev->si_flags & SI_NOSPLIT) { uprintf("%s: request ptr %p is not " "on a page boundary; cannot split " "request\n", devtoname(dev), bp->b_data); error = EFBIG; goto doerror; } bp->b_bcount = bp->b_kvasize; if (iolen != 0) bp->b_bcount -= PAGE_SIZE; } bp->b_bufsize = bp->b_bcount; bp->b_blkno = btodb(bp->b_offset); csw = dev->si_devsw; if (uio->uio_segflg == UIO_USERSPACE) { if (dev->si_flags & SI_UNMAPPED) mapped = 0; else mapped = 1; if (vmapbuf(bp, mapped) < 0) { error = EFAULT; goto doerror; } } dev_strategy_csw(dev, csw, bp); if (uio->uio_rw == UIO_READ) bwait(bp, PRIBIO, "physrd"); else bwait(bp, PRIBIO, "physwr"); if (uio->uio_segflg == UIO_USERSPACE) vunmapbuf(bp); iolen = bp->b_bcount - bp->b_resid; if (iolen == 0 && !(bp->b_ioflags & BIO_ERROR)) goto doerror; /* EOF */ uio->uio_iov[i].iov_len -= iolen; uio->uio_iov[i].iov_base = (char *)uio->uio_iov[i].iov_base + iolen; uio->uio_resid -= iolen; uio->uio_offset += iolen; if( bp->b_ioflags & BIO_ERROR) { error = bp->b_error; goto doerror; } } } doerror: relpbuf(bp, NULL); PRELE(curproc); return (error); }
buffer *getblk(int fs, ino_t inode, int block) { debug(1 + BUFFER_DL, "getblk(fs:%d, inode:%d, block:%d) {\n", fs, (int)inode, block); // wait for semaphore to start while (!semWait(semid)) { // skip signal interruptions if (errno != EINTR) { perror("getblk, semWait"); debug(2 + BUFFER_DL, "ERROR en getblk, semWait."); exit(1); } } // search for specified buffer or return a new one buffer *buff = NULL; while (buff == NULL) { // search hash list for block int pos = bc->header.hash[block % MAXHASH]; buffer *hl_buff = (pos > -1) ? &(bc->buffers[pos]) : NULL; int times = 0; while(pos > -1 && (hl_buff->inode != inode || hl_buff->block != block)) { times ++; if (times == MAXBUFF) { printf("[%d] times: %d ... \n", getpid(), times); int end = pos; do { bdebug(0, &(bc->buffers[pos])); pos = bc->buffers[pos].hashnext; } while (end != pos); exit(0); } pos = hl_buff->hashnext; hl_buff = (pos > -1) ? &(bc->buffers[pos]) : NULL; } // return the buffer if already loaded if (hl_buff != NULL && hl_buff->inode == inode && hl_buff->block == block) { // sleep until the targeted buffer becomes free if (BS_CMP(hl_buff->status, BS_LOCKED)) { bwait(hl_buff); continue; } // take ownership off buffer hl_buff->fd = getfd(inode); hl_buff->pid = getpid(); // lock the found buffer BS_SET(hl_buff->status, BS_LOCKED); // will return found buffer buff = hl_buff; // allocate the block in a new buffer } else { buffer *new_buff = getfree(); // sleep until any buffer becomes free if (new_buff == NULL) { bwait(NULL); continue; } // asynchronous write buffer to disk while (BS_CMP(new_buff->status, BS_DELAYED)) { basyncwrite(new_buff); // wait for free buffer to be written usleep(1000); } // set new buffer properties and proper hash list drophash(new_buff); new_buff->valid = 0; new_buff->inode = inode; new_buff->block = block; new_buff->fd = getfd(inode); new_buff->pid = getpid(); rehash(new_buff); // lock the new buffer and mark it as not valid BS_SET(new_buff->status, BS_LOCKED); BS_CLR(new_buff->status, BS_VALID); // will return the new buffer buff = new_buff; } } // extract the buffer from free list setused(buff); // free the semaphore if (!semSignal(semid)) { perror("getblk, semSignal"); debug(2 + BUFFER_DL, "ERROR en getblk, semSignal."); exit(1); } bdebug(2 + BUFFER_DL, buff); debug(1 + BUFFER_DL, "} -> %p\n", buff); return buff; }
/* * This is now called from local media FS's to operate against their * own vnodes if they fail to implement VOP_GETPAGES. */ int vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int bytecount, int reqpage, vop_getpages_iodone_t iodone, void *arg) { vm_object_t object; struct bufobj *bo; struct buf *bp; daddr_t firstaddr, reqblock; off_t foff, pib; int pbefore, pafter, i, size, bsize, first, last, *freecnt; int count, error, before, after, secmask; KASSERT(vp->v_type != VCHR && vp->v_type != VBLK, ("vnode_pager_generic_getpages does not support devices")); if (vp->v_iflag & VI_DOOMED) return (VM_PAGER_BAD); object = vp->v_object; count = bytecount / PAGE_SIZE; bsize = vp->v_mount->mnt_stat.f_iosize; /* * Synchronous and asynchronous paging operations use different * free pbuf counters. This is done to avoid asynchronous requests * to consume all pbufs. * Allocate the pbuf at the very beginning of the function, so that * if we are low on certain kind of pbufs don't even proceed to BMAP, * but sleep. */ freecnt = iodone != NULL ? &vnode_async_pbuf_freecnt : &vnode_pbuf_freecnt; bp = getpbuf(freecnt); /* * Get the underlying device blocks for the file with VOP_BMAP(). * If the file system doesn't support VOP_BMAP, use old way of * getting pages via VOP_READ. */ error = VOP_BMAP(vp, IDX_TO_OFF(m[reqpage]->pindex) / bsize, &bo, &reqblock, &after, &before); if (error == EOPNOTSUPP) { relpbuf(bp, freecnt); VM_OBJECT_WLOCK(object); for (i = 0; i < count; i++) if (i != reqpage) { vm_page_lock(m[i]); vm_page_free(m[i]); vm_page_unlock(m[i]); } PCPU_INC(cnt.v_vnodein); PCPU_INC(cnt.v_vnodepgsin); error = vnode_pager_input_old(object, m[reqpage]); VM_OBJECT_WUNLOCK(object); return (error); } else if (error != 0) { relpbuf(bp, freecnt); vm_pager_free_nonreq(object, m, reqpage, count, FALSE); return (VM_PAGER_ERROR); /* * If the blocksize is smaller than a page size, then use * special small filesystem code. */ } else if ((PAGE_SIZE / bsize) > 1) { relpbuf(bp, freecnt); vm_pager_free_nonreq(object, m, reqpage, count, FALSE); PCPU_INC(cnt.v_vnodein); PCPU_INC(cnt.v_vnodepgsin); return (vnode_pager_input_smlfs(object, m[reqpage])); } /* * Since the caller has busied the requested page, that page's valid * field will not be changed by other threads. */ vm_page_assert_xbusied(m[reqpage]); /* * If we have a completely valid page available to us, we can * clean up and return. Otherwise we have to re-read the * media. */ if (m[reqpage]->valid == VM_PAGE_BITS_ALL) { relpbuf(bp, freecnt); vm_pager_free_nonreq(object, m, reqpage, count, FALSE); return (VM_PAGER_OK); } else if (reqblock == -1) { relpbuf(bp, freecnt); pmap_zero_page(m[reqpage]); KASSERT(m[reqpage]->dirty == 0, ("vnode_pager_generic_getpages: page %p is dirty", m)); VM_OBJECT_WLOCK(object); m[reqpage]->valid = VM_PAGE_BITS_ALL; vm_pager_free_nonreq(object, m, reqpage, count, TRUE); VM_OBJECT_WUNLOCK(object); return (VM_PAGER_OK); } else if (m[reqpage]->valid != 0) { VM_OBJECT_WLOCK(object); m[reqpage]->valid = 0; VM_OBJECT_WUNLOCK(object); } pib = IDX_TO_OFF(m[reqpage]->pindex) % bsize; pbefore = ((daddr_t)before * bsize + pib) / PAGE_SIZE; pafter = ((daddr_t)(after + 1) * bsize - pib) / PAGE_SIZE - 1; first = reqpage < pbefore ? 0 : reqpage - pbefore; last = reqpage + pafter >= count ? count - 1 : reqpage + pafter; if (first > 0 || last + 1 < count) { VM_OBJECT_WLOCK(object); for (i = 0; i < first; i++) { vm_page_lock(m[i]); vm_page_free(m[i]); vm_page_unlock(m[i]); } for (i = last + 1; i < count; i++) { vm_page_lock(m[i]); vm_page_free(m[i]); vm_page_unlock(m[i]); } VM_OBJECT_WUNLOCK(object); } /* * here on direct device I/O */ firstaddr = reqblock; firstaddr += pib / DEV_BSIZE; firstaddr -= IDX_TO_OFF(reqpage - first) / DEV_BSIZE; /* * The first and last page have been calculated now, move * input pages to be zero based, and adjust the count. */ m += first; reqpage -= first; count = last - first + 1; /* * calculate the file virtual address for the transfer */ foff = IDX_TO_OFF(m[0]->pindex); /* * calculate the size of the transfer */ size = count * PAGE_SIZE; KASSERT(count > 0, ("zero count")); if ((foff + size) > object->un_pager.vnp.vnp_size) size = object->un_pager.vnp.vnp_size - foff; KASSERT(size > 0, ("zero size")); /* * round up physical size for real devices. */ secmask = bo->bo_bsize - 1; KASSERT(secmask < PAGE_SIZE && secmask > 0, ("vnode_pager_generic_getpages: sector size %d too large", secmask + 1)); size = (size + secmask) & ~secmask; /* * and map the pages to be read into the kva, if the filesystem * requires mapped buffers. */ if ((vp->v_mount->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0 && unmapped_buf_allowed) { bp->b_data = unmapped_buf; bp->b_offset = 0; } else { bp->b_data = bp->b_kvabase; pmap_qenter((vm_offset_t)bp->b_data, m, count); } /* build a minimal buffer header */ bp->b_iocmd = BIO_READ; KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); bp->b_rcred = crhold(curthread->td_ucred); bp->b_wcred = crhold(curthread->td_ucred); bp->b_blkno = firstaddr; pbgetbo(bo, bp); bp->b_vp = vp; bp->b_bcount = size; bp->b_bufsize = size; bp->b_runningbufspace = bp->b_bufsize; for (i = 0; i < count; i++) bp->b_pages[i] = m[i]; bp->b_npages = count; bp->b_pager.pg_reqpage = reqpage; atomic_add_long(&runningbufspace, bp->b_runningbufspace); PCPU_INC(cnt.v_vnodein); PCPU_ADD(cnt.v_vnodepgsin, count); /* do the input */ bp->b_iooffset = dbtob(bp->b_blkno); if (iodone != NULL) { /* async */ bp->b_pager.pg_iodone = iodone; bp->b_caller1 = arg; bp->b_iodone = vnode_pager_generic_getpages_done_async; bp->b_flags |= B_ASYNC; BUF_KERNPROC(bp); bstrategy(bp); /* Good bye! */ } else { bp->b_iodone = bdone; bstrategy(bp); bwait(bp, PVM, "vnread"); error = vnode_pager_generic_getpages_done(bp); for (i = 0; i < bp->b_npages; i++) bp->b_pages[i] = NULL; bp->b_vp = NULL; pbrelbo(bp); relpbuf(bp, &vnode_pbuf_freecnt); } return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK); }