struct buffer_head *breada(kdev_t dev, block_t block, int bufsize, unsigned int pos, unsigned int filesize) { register struct buffer_head *bh, *bha; int i, j; if (pos >= filesize) return NULL; if (block < 0) return NULL; bh = getblk(dev, block); if (buffer_uptodate(bh)) return bh; bha = getblk(dev, block + 1); if (buffer_uptodate(bha)) { brelse(bha); bha = NULL; } else { /* Request the read for these buffers, and then release them */ ll_rw_blk(READ, bha); brelse(bha); } /* Wait for this buffer, and then continue on */ wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; brelse(bh); return NULL; }
static void sync_buffers(kdev_t dev, int wait) { register struct buffer_head *bh; for (bh = bh_chain; bh != NULL; bh = bh->b_next) { if (dev && bh->b_dev != dev) continue; /* * Skip clean buffers. */ if (buffer_clean(bh)) continue; /* * Locked buffers.. * * Buffer is locked; skip it unless wait is requested * AND pass > 0. */ if (buffer_locked(bh) && wait) continue; else wait_on_buffer(bh); /* * Do the stuff */ bh->b_count++; ll_rw_blk(WRITE, bh); bh->b_count--; } return; }
struct buffer_head *readbuf(register struct buffer_head *bh) { if (!buffer_uptodate(bh)) { ll_rw_blk(READ, bh); wait_on_buffer(bh); if (!buffer_uptodate(bh)) { brelse(bh); bh = NULL; } } return bh; }
static int swap_out(seg_t base) { register struct task_struct *t; register struct malloc_hole *o = find_hole(&memmap, base); struct malloc_hole *so; int ct, blocks; /* We can hit disk this time. Allocate a hole in 1K increments */ blocks = (o->extent + 0x3F) >> 6; so = best_fit_hole(&swapmap, blocks); if (so == NULL) { /* No free swap */ return -1; } split_hole(&swapmap, so, blocks); so->flags = HOLE_USED; so->refcount = o->refcount; for_each_task(t) { int c = t->mm.flags; if (t->mm.cseg == base && !(c & CS_SWAP)) { t->mm.cseg = so->page_base; t->mm.flags |= CS_SWAP; debug2("MALLOC: swaping out code of pid %d blocks %d\n", t->pid, blocks); } if (t->mm.dseg == base && !(c & DS_SWAP)) { t->mm.dseg = so->page_base; t->mm.flags |= DS_SWAP; debug2("MALLOC: swaping out data of pid %d blocks %d\n", t->pid, blocks); } } /* Now write the segment out */ for (ct = 0; ct < blocks; ct++) { swap_buf.b_blocknr = so->page_base + ct; swap_buf.b_dev = swap_dev; swap_buf.b_lock = 0; swap_buf.b_dirty = 1; swap_buf.b_seg = o->page_base; swap_buf.b_data = ct << 10; ll_rw_blk(WRITE, &swap_buf); wait_on_buffer(&swap_buf); } o->flags = HOLE_FREE; sweep_holes(&memmap); return 1; }
static int blk_rw(struct inode *inode, register struct file *filp, char *buf, size_t count, int wr) { register struct buffer_head *bh; size_t chars, offset; int written = 0; while (count > 0) { /* * Offset to block/offset */ offset = ((size_t)filp->f_pos) & (BLOCK_SIZE - 1); chars = BLOCK_SIZE - offset; if (chars > count) chars = count; /* * Read the block in - use getblk on a write * of a whole block to avoid a read of the data. */ bh = getblk(inode->i_rdev, (block_t)(filp->f_pos >> BLOCK_SIZE_BITS)); if ((wr == BLOCK_READ) || (chars != BLOCK_SIZE)) { if (!readbuf(bh)) { if (!written) written = -EIO; break; } } map_buffer(bh); if (wr == BLOCK_WRITE) { /* * Alter buffer, mark dirty */ memcpy_fromfs(bh->b_data + offset, buf, chars); bh->b_uptodate = bh->b_dirty = 1; /* * Writing: queue physical I/O */ ll_rw_blk(WRITE, bh); wait_on_buffer(bh); if (!bh->b_uptodate) { /* Write error. */ unmap_brelse(bh); if (!written) written = -EIO; break; } } else { /* * Empty buffer data. Buffer unchanged */ memcpy_tofs(buf, bh->b_data + offset, chars); } /* * Move on and release buffer */ unmap_brelse(bh); buf += chars; filp->f_pos += chars; written += chars; count -= chars; } return written; }
static int swap_in(seg_t base, int chint) { register struct malloc_hole *o; struct malloc_hole *so; int ct, blocks; register struct task_struct *t; so = find_hole(&swapmap, base); /* Find memory for this segment */ o = best_fit_hole(&memmap, so->extent << 6); if (o == NULL) return -1; /* Now read the segment in */ split_hole(&memmap, o, so->extent << 6); o->flags = HOLE_USED; o->refcount = so->refcount; blocks = so->extent; for (ct = 0; ct < blocks; ct++) { swap_buf.b_blocknr = so->page_base + ct; swap_buf.b_dev = swap_dev; swap_buf.b_lock = 0; swap_buf.b_dirty = 0; swap_buf.b_uptodate = 0; swap_buf.b_seg = o->page_base; swap_buf.b_data = ct << 10; ll_rw_blk(READ, &swap_buf); wait_on_buffer(&swap_buf); } /* * Update the memory management tables */ for_each_task(t) { int c = t->mm.flags; if (t->mm.cseg == base && c & CS_SWAP) { debug2("MALLOC: swapping in code of pid %d seg %x\n", t->pid, t->mm.cseg); t->mm.cseg = o->page_base; t->mm.flags &= ~CS_SWAP; } if (t->mm.dseg == base && c & DS_SWAP) { debug2("MALLOC: swapping in data of pid %d seg %x\n", t->pid, t->mm.dseg); t->mm.dseg = o->page_base; t->mm.flags &= ~DS_SWAP; } if (c && !t->mm.flags) { t->t_regs.cs = t->mm.cseg; t->t_regs.ds = t->mm.dseg; t->t_regs.ss = t->mm.dseg; put_ustack(t, 2, t->t_regs.cs); } } /* Our equivalent of the Linux swap cache. Try and avoid writing CS * back. Need to kill segments on last exit for this to work, and * keep a table - TODO */ #if 0 if (chint==0) #endif { so->refcount = 0; so->flags = HOLE_FREE; sweep_holes(&swapmap); } return 0; }