/* * Get a block of requested size that is associated with * a given vnode and block offset. If it is found in the * block cache, mark it as having been found, make it busy * and return it. Otherwise, return an empty block of the * correct size. It is up to the caller to insure that the * cached blocks be of the correct size. */ struct buf * getblk(register struct vnode *vp, daddr_t blkno, int size) { struct buf *bp, *bh; int x; for (;;) { if (bp = incore(vp, blkno)) { x = splbio(); if (bp->b_flags & B_BUSY) { bp->b_flags |= B_WANTED; sleep (bp, PRIBIO); splx(x); continue; } bp->b_flags |= B_BUSY | B_CACHE; bremfree(bp); if (size > bp->b_bufsize) panic("now what do we do?"); /* if (bp->b_bufsize != size) allocbuf(bp, size); */ } else { if((bp = getnewbuf(size)) == 0) continue; bp->b_blkno = bp->b_lblkno = blkno; bgetvp(vp, bp); x = splbio(); bh = BUFHASH(vp, blkno); binshash(bp, bh); bp->b_flags = B_BUSY; } splx(x); return (bp); } }
/* * Get an empty, disassociated buffer of given size. */ struct buf * geteblk(int size) { struct buf *bp; getnewbuf(0, 0, &bp); SET(bp->b_flags, B_INVAL); binshash(bp, &invalhash); allocbuf(bp, size); return (bp); }
/* * Get an empty, disassociated buffer of given size. */ struct buf * geteblk(int size) { struct buf *bp; int x; while ((bp = getnewbuf(size)) == 0) ; x = splbio(); binshash(bp, bfreelist + BQ_AGE); splx(x); return (bp); }
int swcwrite(dev_t dev, register struct uio *uio, int flag) { unsigned int block; unsigned int boff; struct buf *bp; unsigned int rsize; unsigned int rlen; int unit = minor(dev); if (unit >= NTMP) { printf("temp%d: Device number out of range\n", minor(dev)); return ENODEV; } if (tdstart[unit] == 0) { printf("temp%d: attempt to write with no allocation\n", unit); return EIO; } if (uio->uio_offset >= tdsize[unit] << 10) { printf("temp%d: attempt to write past end of allocation\n", unit); return EIO; } bp = getnewbuf(); block = uio->uio_offset >> 10; boff = uio->uio_offset - (block << 10); rsize = DEV_BSIZE - boff; rlen = uio->uio_iov->iov_len; while (rlen > 0 && block < tdsize[unit]) { uiomove(bp->b_addr+boff, rsize, uio); swap(tdstart[unit] + block, (size_t)bp->b_addr, DEV_BSIZE, B_WRITE); boff = 0; block++; rlen -= rsize; rsize = rlen >= DEV_BSIZE ? DEV_BSIZE : rlen; } brelse(bp); return 0; }
static inline struct buf *read_mbr(int root) { if(root>MAXDEV) return NULL; int rv; int unit = disks[root].unit; struct buf *bp = getnewbuf(); DEBUG8("rd%d: read mbr from device %d\n",root,unit); rv = disks[root].read(unit,0,bp->b_addr,512); if(rv==0) { DEBUG8("rd%d: mbr read FAIL\n",root); brelse(buf); return NULL; } DEBUG8("rd%d: mbr read OK\n",root); return bp; }
int swcread(dev_t dev, register struct uio *uio, int flag) { unsigned int block; unsigned int boff; struct buf *bp; unsigned int rsize; unsigned int rlen; int unit = minor(dev); if (unit >= NTMP) { printf("temp%d: Device number out of range\n", minor(dev)); return ENODEV; } if (tdstart[unit] == 0) return EIO; if (uio->uio_offset >= tdsize[unit] << 10) return EIO; bp = getnewbuf(); block = uio->uio_offset >> 10; boff = uio->uio_offset - (block << 10); rsize = DEV_BSIZE - boff; rlen = uio->uio_iov->iov_len; while((rlen > 0) && (block < tdsize[unit])) { swap(tdstart[unit] + block, (size_t)bp->b_addr, DEV_BSIZE, B_READ); uiomove(bp->b_addr+boff, rsize, uio); boff = 0; block++; rlen -= rsize; rsize = rlen >= DEV_BSIZE ? DEV_BSIZE : rlen; } brelse(bp); return 0; }
struct buf *prepartition_device(char *devname) { #ifndef PARTITION char *prepartition_schema = ""; #else char *prepartition_schema = (char *)QUOTE(PARTITION); #endif char *p,*q; struct buf *bp; struct mbr *mbr; int pnum = 0; int start = 2; char dev[9]; char size[9]; char type[5]; int pos = 0; p = prepartition_schema+1; q = p; printf("PP Schema: %s\n",prepartition_schema); bp = getnewbuf(); if(!bp) { return NULL; } mbr = (struct mbr *)bp->b_addr; while(*p) { while(*q && *q != ' ') q++; if(*q == ' ') { *q = 0; q++; } pos = 0; while((*p) && (*p != ':')) { dev[pos++] = *p; dev[pos] = 0; p++; } if((*p) == ':') { p++; } else { printf("Device Format Error (%c)\n",*p); brelse(bp); return NULL; } while((*p) && (*p != ' ')) { pos = 0; while((*p) && (*p != '@')) { type[pos++] = *p; type[pos] = 0; p++; } if((*p) == '@') { p++; } else { printf("Type Format Error\n"); brelse(bp); return NULL; } pos = 0; while((*p) && (*p != ',') && (*p != ' ') && (*p != ')')) { size[pos++] = *p; size[pos] = 0; p++; } printf("Found partition on %s of type %s and size %s\n", dev,type,size); if(!strcmp(dev,devname)) { if(!strcmp("sw",type)) mbr->partitions[pnum].type=RDISK_SWAP; if(!strcmp("sa",type)) { mbr->partitions[pnum].type=RDISK_SWAP; mbr->partitions[pnum].status = 0x80; } if(!strcmp("fs",type)) mbr->partitions[pnum].type=RDISK_FS; mbr->partitions[pnum].lbastart = start; mbr->partitions[pnum].lbalength = atoi(size)<<1; start += mbr->partitions[pnum].lbalength; pnum++; } p++; } } if(pnum > 0) { mbr->bootsig = 0xAA55; mbr->biosdrive = 0x80; mbr->sig = 'R'<<24 | 'T'<<16 | 'E'<<8 | 'R'; return bp; } brelse(bp); return NULL; }
/* * Expand or contract the actual memory allocated to a buffer. * * If the buffer shrinks, data is lost, so it's up to the * caller to have written it out *first*; this routine will not * start a write. If the buffer grows, it's the callers * responsibility to fill out the buffer's additional contents. */ void allocbuf(struct buf *bp, int size) { struct buf *nbp; vsize_t desired_size; int s; desired_size = round_page(size); if (desired_size > MAXBSIZE) panic("allocbuf: buffer larger than MAXBSIZE requested"); if (bp->b_bufsize == desired_size) goto out; /* * If the buffer is smaller than the desired size, we need to snarf * it from other buffers. Get buffers (via getnewbuf()), and * steal their pages. */ while (bp->b_bufsize < desired_size) { int amt; /* find a buffer */ getnewbuf(0, 0, &nbp); SET(nbp->b_flags, B_INVAL); binshash(nbp, &invalhash); /* and steal its pages, up to the amount we need */ amt = MIN(nbp->b_bufsize, (desired_size - bp->b_bufsize)); pagemove((nbp->b_data + nbp->b_bufsize - amt), bp->b_data + bp->b_bufsize, amt); bp->b_bufsize += amt; nbp->b_bufsize -= amt; /* reduce transfer count if we stole some data */ if (nbp->b_bcount > nbp->b_bufsize) nbp->b_bcount = nbp->b_bufsize; #ifdef DIAGNOSTIC if (nbp->b_bufsize < 0) panic("allocbuf: negative bufsize"); #endif brelse(nbp); } /* * If we want a buffer smaller than the current size, * shrink this buffer. Grab a buf head from the EMPTY queue, * move a page onto it, and put it on front of the AGE queue. * If there are no free buffer headers, leave the buffer alone. */ if (bp->b_bufsize > desired_size) { s = splbio(); if ((nbp = bufqueues[BQ_EMPTY].tqh_first) == NULL) { /* No free buffer head */ splx(s); goto out; } bremfree(nbp); SET(nbp->b_flags, B_BUSY); splx(s); /* move the page to it and note this change */ pagemove(bp->b_data + desired_size, nbp->b_data, bp->b_bufsize - desired_size); nbp->b_bufsize = bp->b_bufsize - desired_size; bp->b_bufsize = desired_size; nbp->b_bcount = 0; SET(nbp->b_flags, B_INVAL); /* release the newly-filled buffer and leave */ brelse(nbp); } out: bp->b_bcount = size; }
/* * Get a block of requested size that is associated with * a given vnode and block offset. If it is found in the * block cache, mark it as having been found, make it busy * and return it. Otherwise, return an empty block of the * correct size. It is up to the caller to insure that the * cached blocks be of the correct size. */ struct buf * getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo) { struct bufhashhdr *bh; struct buf *bp, *nbp = NULL; int s, err; /* * XXX * The following is an inlined version of 'incore()', but with * the 'invalid' test moved to after the 'busy' test. It's * necessary because there are some cases in which the NFS * code sets B_INVAL prior to writing data to the server, but * in which the buffers actually contain valid data. In this * case, we can't allow the system to allocate a new buffer for * the block until the write is finished. */ bh = BUFHASH(vp, blkno); start: LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) { if (bp->b_lblkno != blkno || bp->b_vp != vp) continue; s = splbio(); if (ISSET(bp->b_flags, B_BUSY)) { SET(bp->b_flags, B_WANTED); err = tsleep(bp, slpflag | (PRIBIO + 1), "getblk", slptimeo); splx(s); if (err) return (NULL); goto start; } if (!ISSET(bp->b_flags, B_INVAL)) { SET(bp->b_flags, (B_BUSY | B_CACHE)); bremfree(bp); splx(s); break; } splx(s); } if (bp == NULL) { if (nbp == NULL && getnewbuf(slpflag, slptimeo, &nbp) != 0) { goto start; } bp = nbp; binshash(bp, bh); bp->b_blkno = bp->b_lblkno = blkno; s = splbio(); bgetvp(vp, bp); splx(s); } else if (nbp != NULL) { /* * Set B_AGE so that buffer appear at BQ_CLEAN head * and gets reused ASAP. */ SET(nbp->b_flags, B_AGE); brelse(nbp); } allocbuf(bp, size); return (bp); }