seg_t mm_dup(seg_t base) { register struct malloc_hole *o, *m; size_t i; debug("MALLOC: mm_dup()\n"); o = find_hole(&memmap, base); if (o->flags != HOLE_USED) panic("bad/swapped hole"); #ifdef CONFIG_SWAP while ((m = best_fit_hole(&memmap, o->extent)) == NULL) { seg_t s = swap_strategy(NULL); if (!s || swap_out(s) == -1) return NULL; } #else m = best_fit_hole(&memmap, o->extent); if (m == NULL) return NULL; #endif split_hole(&memmap, m, o->extent); m->flags = HOLE_USED; m->refcount = 1; i = (o->extent << 4); fmemcpy(m->page_base, 0, o->page_base, 0, (__u16) i); return m->page_base; }
seg_t mm_alloc(segext_t pages) { /* * Which hole fits best ? */ register struct malloc_hole *m; #ifdef CONFIG_SWAP while ((m = best_fit_hole(&memmap, pages)) == NULL) { seg_t s = swap_strategy(NULL); if (s == NULL || swap_out(s) == -1) return NULL; } #else m = best_fit_hole(&memmap, pages); if (m == NULL) return NULL; #endif /* * The hole is (probably) too big */ split_hole(&memmap, m, pages); m->flags = HOLE_USED; m->refcount = 1; return m->page_base; }
static int swap_out(seg_t base) { register struct task_struct *t; register struct malloc_hole *o = find_hole(&memmap, base); struct malloc_hole *so; int ct, blocks; /* We can hit disk this time. Allocate a hole in 1K increments */ blocks = (o->extent + 0x3F) >> 6; so = best_fit_hole(&swapmap, blocks); if (so == NULL) { /* No free swap */ return -1; } split_hole(&swapmap, so, blocks); so->flags = HOLE_USED; so->refcount = o->refcount; for_each_task(t) { int c = t->mm.flags; if (t->mm.cseg == base && !(c & CS_SWAP)) { t->mm.cseg = so->page_base; t->mm.flags |= CS_SWAP; debug2("MALLOC: swaping out code of pid %d blocks %d\n", t->pid, blocks); } if (t->mm.dseg == base && !(c & DS_SWAP)) { t->mm.dseg = so->page_base; t->mm.flags |= DS_SWAP; debug2("MALLOC: swaping out data of pid %d blocks %d\n", t->pid, blocks); } } /* Now write the segment out */ for (ct = 0; ct < blocks; ct++) { swap_buf.b_blocknr = so->page_base + ct; swap_buf.b_dev = swap_dev; swap_buf.b_lock = 0; swap_buf.b_dirty = 1; swap_buf.b_seg = o->page_base; swap_buf.b_data = ct << 10; ll_rw_blk(WRITE, &swap_buf); wait_on_buffer(&swap_buf); } o->flags = HOLE_FREE; sweep_holes(&memmap); return 1; }
static struct hole *mm_alloc(uint16_t pages) { /* * Which hole fits best ? */ struct hole *m; m = best_fit_hole(&memmap, pages); if (m == NULL) return NULL; /* * The hole is (probably) too big */ if (split_hole(&memmap, m, pages)) return NULL; m->flags = HOLE_USED; m->refcount = 1; return m; }
static int swap_in(seg_t base, int chint) { register struct malloc_hole *o; struct malloc_hole *so; int ct, blocks; register struct task_struct *t; so = find_hole(&swapmap, base); /* Find memory for this segment */ o = best_fit_hole(&memmap, so->extent << 6); if (o == NULL) return -1; /* Now read the segment in */ split_hole(&memmap, o, so->extent << 6); o->flags = HOLE_USED; o->refcount = so->refcount; blocks = so->extent; for (ct = 0; ct < blocks; ct++) { swap_buf.b_blocknr = so->page_base + ct; swap_buf.b_dev = swap_dev; swap_buf.b_lock = 0; swap_buf.b_dirty = 0; swap_buf.b_uptodate = 0; swap_buf.b_seg = o->page_base; swap_buf.b_data = ct << 10; ll_rw_blk(READ, &swap_buf); wait_on_buffer(&swap_buf); } /* * Update the memory management tables */ for_each_task(t) { int c = t->mm.flags; if (t->mm.cseg == base && c & CS_SWAP) { debug2("MALLOC: swapping in code of pid %d seg %x\n", t->pid, t->mm.cseg); t->mm.cseg = o->page_base; t->mm.flags &= ~CS_SWAP; } if (t->mm.dseg == base && c & DS_SWAP) { debug2("MALLOC: swapping in data of pid %d seg %x\n", t->pid, t->mm.dseg); t->mm.dseg = o->page_base; t->mm.flags &= ~DS_SWAP; } if (c && !t->mm.flags) { t->t_regs.cs = t->mm.cseg; t->t_regs.ds = t->mm.dseg; t->t_regs.ss = t->mm.dseg; put_ustack(t, 2, t->t_regs.cs); } } /* Our equivalent of the Linux swap cache. Try and avoid writing CS * back. Need to kill segments on last exit for this to work, and * keep a table - TODO */ #if 0 if (chint==0) #endif { so->refcount = 0; so->flags = HOLE_FREE; sweep_holes(&swapmap); } return 0; }