seg_t mm_dup(seg_t base) { register struct malloc_hole *o, *m; size_t i; debug("MALLOC: mm_dup()\n"); o = find_hole(&memmap, base); if (o->flags != HOLE_USED) panic("bad/swapped hole"); #ifdef CONFIG_SWAP while ((m = best_fit_hole(&memmap, o->extent)) == NULL) { seg_t s = swap_strategy(NULL); if (!s || swap_out(s) == -1) return NULL; } #else m = best_fit_hole(&memmap, o->extent); if (m == NULL) return NULL; #endif split_hole(&memmap, m, o->extent); m->flags = HOLE_USED; m->refcount = 1; i = (o->extent << 4); fmemcpy(m->page_base, 0, o->page_base, 0, (__u16) i); return m->page_base; }
seg_t mm_alloc(segext_t pages) { /* * Which hole fits best ? */ register struct malloc_hole *m; #ifdef CONFIG_SWAP while ((m = best_fit_hole(&memmap, pages)) == NULL) { seg_t s = swap_strategy(NULL); if (s == NULL || swap_out(s) == -1) return NULL; } #else m = best_fit_hole(&memmap, pages); if (m == NULL) return NULL; #endif /* * The hole is (probably) too big */ split_hole(&memmap, m, pages); m->flags = HOLE_USED; m->refcount = 1; return m->page_base; }
static int swap_out(seg_t base) { register struct task_struct *t; register struct malloc_hole *o = find_hole(&memmap, base); struct malloc_hole *so; int ct, blocks; /* We can hit disk this time. Allocate a hole in 1K increments */ blocks = (o->extent + 0x3F) >> 6; so = best_fit_hole(&swapmap, blocks); if (so == NULL) { /* No free swap */ return -1; } split_hole(&swapmap, so, blocks); so->flags = HOLE_USED; so->refcount = o->refcount; for_each_task(t) { int c = t->mm.flags; if (t->mm.cseg == base && !(c & CS_SWAP)) { t->mm.cseg = so->page_base; t->mm.flags |= CS_SWAP; debug2("MALLOC: swaping out code of pid %d blocks %d\n", t->pid, blocks); } if (t->mm.dseg == base && !(c & DS_SWAP)) { t->mm.dseg = so->page_base; t->mm.flags |= DS_SWAP; debug2("MALLOC: swaping out data of pid %d blocks %d\n", t->pid, blocks); } } /* Now write the segment out */ for (ct = 0; ct < blocks; ct++) { swap_buf.b_blocknr = so->page_base + ct; swap_buf.b_dev = swap_dev; swap_buf.b_lock = 0; swap_buf.b_dirty = 1; swap_buf.b_seg = o->page_base; swap_buf.b_data = ct << 10; ll_rw_blk(WRITE, &swap_buf); wait_on_buffer(&swap_buf); } o->flags = HOLE_FREE; sweep_holes(&memmap); return 1; }
static struct hole *mm_alloc(uint16_t pages) { /* * Which hole fits best ? */ struct hole *m; m = best_fit_hole(&memmap, pages); if (m == NULL) return NULL; /* * The hole is (probably) too big */ if (split_hole(&memmap, m, pages)) return NULL; m->flags = HOLE_USED; m->refcount = 1; return m; }
void read_and_delta (file_reader & reader , xdelta_stream & stream , const hash_table & hashes , std::set<hole_t> & hole_set , const int blk_len , bool need_split_hole) { bool adddiff = !need_split_hole; char_buffer<uchar_t> buf (XDELTA_BUFFER_LEN); typedef std::set<hole_t>::iterator it_t; std::list<hole_t> holes2remove; for (it_t begin = hole_set.begin (); begin != hole_set.end (); ++begin) { const hole_t & hole = *begin; uint64_t offset = reader.seek_file (hole.offset, FILE_BEGIN); if (offset != hole.offset) { std::string errmsg = fmt_string ("Can't seek file %s(%s)." , reader.get_fname ().c_str (), error_msg ().c_str ()); THROW_XDELTA_EXCEPTION (errmsg); } uint32_t buflen = XDELTA_BUFFER_LEN; uint32_t to_read_bytes = (uint32_t)hole.length; buflen = to_read_bytes > buflen ? buflen : to_read_bytes; uchar_t * rdbuf = buf.begin (); uint32_t size = reader.read_file (rdbuf, buflen); if (size != buflen) { std::string errmsg = fmt_string ("Can't read file %s(%s)." , reader.get_fname ().c_str (), error_msg ().c_str ()); THROW_XDELTA_EXCEPTION (errmsg); } to_read_bytes -= size; const uchar_t * endbuf = rdbuf + size; rdbuf = buf.begin (); if ((int32_t)(endbuf - rdbuf) >= blk_len) { uchar_t * sentrybuf = rdbuf; rolling_hasher hasher; hasher.eat_hash (rdbuf, blk_len); while (true) { bool newhash = false; const slow_hash * bsh = hashes.find_block (hasher.hash_value (), rdbuf, blk_len); uchar_t outchar = 0; if (bsh) { // a match was found. uint32_t slipsize = (uint32_t)(rdbuf - sentrybuf); if (slipsize > 0 && adddiff) stream.add_block (sentrybuf, slipsize, offset); offset += slipsize; stream.add_block (bsh->tpos, blk_len, offset); if (need_split_hole) { hole_t newhole; newhole.offset = offset; newhole.length = blk_len; holes2remove.push_back (newhole); } rdbuf += blk_len; sentrybuf = rdbuf; newhash = true; offset += blk_len; } else { // slip the window by one bytes which size is blk_len. outchar = *rdbuf; ++rdbuf; } // // beyond the buffer. int remain = (int)(endbuf - rdbuf); if (remain < blk_len) { if (to_read_bytes == 0) { // no more to read. uint32_t slipsize = (uint32_t)(endbuf - sentrybuf); if (slipsize > 0 && adddiff) stream.add_block (sentrybuf, slipsize, offset); goto end; } else { memmove (buf.begin (), rdbuf, remain); rdbuf = buf.begin (); sentrybuf = rdbuf; buflen = XDELTA_BUFFER_LEN - remain; buflen = to_read_bytes > buflen ? buflen : to_read_bytes; size = reader.read_file (rdbuf + remain, buflen); if (size != buflen) { std::string errmsg = fmt_string ("Can't read file %s(%s)." , reader.get_fname ().c_str (), error_msg ().c_str ()); THROW_XDELTA_EXCEPTION (errmsg); } to_read_bytes -= size; endbuf = rdbuf + remain + size; remain += size; if (remain >= blk_len) { if (newhash) hasher.eat_hash (rdbuf, blk_len); else hasher.update (outchar, *(rdbuf + blk_len)); } else { // // one read must complement data which length plus // remain must be more than one block length of @f_blk_len, // so if remain less than that, it must be reach the end of // file // if (adddiff) stream.add_block (rdbuf, remain, offset); offset += remain; goto end; } } } else { if (newhash) hasher.eat_hash (rdbuf, blk_len); else hasher.update (outchar, *(rdbuf + blk_len - 1)); } } } else { if (adddiff) stream.add_block (rdbuf, size, offset); } end: continue; } if (need_split_hole) { typedef std::list<hole_t>::iterator it_t; for (it_t begin = holes2remove.begin (); begin != holes2remove.end (); ++begin) split_hole (hole_set, *begin); } return; }
static int swap_in(seg_t base, int chint) { register struct malloc_hole *o; struct malloc_hole *so; int ct, blocks; register struct task_struct *t; so = find_hole(&swapmap, base); /* Find memory for this segment */ o = best_fit_hole(&memmap, so->extent << 6); if (o == NULL) return -1; /* Now read the segment in */ split_hole(&memmap, o, so->extent << 6); o->flags = HOLE_USED; o->refcount = so->refcount; blocks = so->extent; for (ct = 0; ct < blocks; ct++) { swap_buf.b_blocknr = so->page_base + ct; swap_buf.b_dev = swap_dev; swap_buf.b_lock = 0; swap_buf.b_dirty = 0; swap_buf.b_uptodate = 0; swap_buf.b_seg = o->page_base; swap_buf.b_data = ct << 10; ll_rw_blk(READ, &swap_buf); wait_on_buffer(&swap_buf); } /* * Update the memory management tables */ for_each_task(t) { int c = t->mm.flags; if (t->mm.cseg == base && c & CS_SWAP) { debug2("MALLOC: swapping in code of pid %d seg %x\n", t->pid, t->mm.cseg); t->mm.cseg = o->page_base; t->mm.flags &= ~CS_SWAP; } if (t->mm.dseg == base && c & DS_SWAP) { debug2("MALLOC: swapping in data of pid %d seg %x\n", t->pid, t->mm.dseg); t->mm.dseg = o->page_base; t->mm.flags &= ~DS_SWAP; } if (c && !t->mm.flags) { t->t_regs.cs = t->mm.cseg; t->t_regs.ds = t->mm.dseg; t->t_regs.ss = t->mm.dseg; put_ustack(t, 2, t->t_regs.cs); } } /* Our equivalent of the Linux swap cache. Try and avoid writing CS * back. Need to kill segments on last exit for this to work, and * keep a table - TODO */ #if 0 if (chint==0) #endif { so->refcount = 0; so->flags = HOLE_FREE; sweep_holes(&swapmap); } return 0; }