shpid_t lg_tag_indirect_h::_pid(uint4_t pid_num) const { FUNC(lg_tag_indirect_h::_pid); // get the root page lpid_t root_pid(stid(), _iref.indirect_root); lgindex_p root; W_IGNORE( root.fix(root_pid, LATCH_SH) ); // PAGEFIXBUG if (!root.is_fixed()) return 0; // if the tree is only 1 level, return the correct pid easily if (indirect_type(_page_cnt) == t_large_1) { return root.pids(pid_num); } w_assert9(indirect_type(_page_cnt) == t_large_2); // find "slot" containing pointer to page with pid_num w_assert9( (pid_num/lgindex_p::max_pids) < max_uint2); slotid_t idx = (slotid_t)(pid_num/lgindex_p::max_pids); lpid_t indirect_pid(stid(), root.pids(idx)); lgindex_p indirect; W_IGNORE( indirect.fix(indirect_pid, LATCH_SH) ); // PAGEFIXBUG if (!indirect.is_fixed()) return 0; return indirect.pids(pid_num % lgindex_p::max_pids); }
rc_t lgdata_p::format(const lpid_t& pid, tag_t tag, uint4_t flags, store_flag_t store_flags ) { w_assert9(tag == t_lgdata_p); vec_t vec; // empty vector // format, then create a 0-length slot /* Do the formatting and insert w/o logging them */ W_DO( page_p::_format(pid, tag, flags, store_flags) ); // always set the store_flag here --see comments // in bf::fix(), which sets the store flags to st_regular // for all pages, and lets the type-specific store manager // override (because only file pages can be insert_file) // persistent_part().set_page_storeflags ( store_flags ); this->set_store_flags(store_flags); // through the page_p, through the bfcb_t W_COERCE( page_p::insert_expand(0, 1, &vec, false/*logit*/) ); /* Now, log as one (combined) record: */ rc_t rc = log_page_format(*this, 0, 1, &vec); // lgdata_p return rc; }
rc_t lg_tag_chunks_h::update(uint4_t start_byte, const vec_t& data) const { FUNC(lg_tag_chunks_h::update); uint4_t amount; // amount to update on page uint4_t pid_count = start_byte/lgdata_p::data_sz; // first page uint4_t data_size = data.size(); lpid_t curr_pid(_page.pid().vol(), _cref.store, 0); uint4_t offset = start_byte%lgdata_p::data_sz; uint4_t num_bytes = 0; while (num_bytes < data_size) { amount = MIN(lgdata_p::data_sz-offset, data_size-num_bytes); curr_pid.page = _pid(pid_count); lgdata_p lgdata; W_DO( lgdata.fix(curr_pid, LATCH_EX) ); W_DO(lgdata.update(offset, data, num_bytes, amount)); offset = 0; num_bytes += amount; pid_count++; } // verify last page touched is same as calculated last page w_assert9(pid_count-1 == (start_byte+data.size()-1)/lgdata_p::data_sz); return RCOK; }
/********************************************************************* * * logrec_t::fill(pid, len) * * Fill the "pid" and "length" field of the log record. * *********************************************************************/ void logrec_t::fill(PageID p, StoreID store, uint16_t tag, smsize_t l) { w_assert9(w_base_t::is_aligned(_data)); /* adjust _cat */ xct_t *x = xct(); if(x && (x->rolling_back() || x->state() == smlevel_0::xct_aborting)) { header._cat |= t_rollback; } set_pid(0); if (!is_single_sys_xct()) { // prv does not exist in single-log system transaction set_xid_prev(lsn_t::null); } header._page_tag = tag; header._pid = p; header._stid = store; char *dat = is_single_sys_xct() ? data_ssx() : data(); if (l != ALIGN_BYTE(l)) { // zero out extra space to keep purify happy memset(dat+l, 0, ALIGN_BYTE(l)-l); } unsigned int tmp = ALIGN_BYTE(l) + (is_single_sys_xct() ? hdr_single_sys_xct_sz : hdr_non_ssx_sz) + sizeof(lsn_t); tmp = (tmp + 7) & unsigned(-8); // force 8-byte alignment w_assert1(tmp <= sizeof(*this)); header._len = tmp; if(type() != t_skip) { DBG( << "Creat log rec: " << *this << " size: " << header._len << " xid_prevlsn: " << (is_single_sys_xct() ? lsn_t::null : xid_prev()) ); }
w_base_t::int4_t w_bitmap_t::first_set(uint4_t start) const { w_assert9(start < sz); register uint1_t* p = ptr + div8(start); register uint4_t mask = 1 << mod8(start); register uint4_t size = sz; for (size -= start; size; start++, size--) { if (*p & mask) { w_assert9(is_set(start)); return start; } if ((mask <<= 1) == 0x100) { mask = 1; p++; } } return -1; }
rc_t lg_tag_indirect_h::append(uint4_t num_pages, const lpid_t new_pages[]) { FUNC(lg_tag_indirect_h::append); const uint max_pages = 64; shpid_t page_list[max_pages]; w_assert9(num_pages <= max_pages); for (uint i=0; i<num_pages; i++) page_list[i]=new_pages[i].page; if (_iref.indirect_root == 0) { // allocate a root indirect page, near last page in store lpid_t root_pid; W_DO(smlevel_0::io->alloc_a_page(stid(), lpid_t::eof, // near hint root_pid, // npages, array for output pids false, // not may_realloc EX, // lock on the allocated pages false // do not search file for free pages )); _iref.indirect_root = root_pid.page; lgindex_p root; W_DO( root.fix(root_pid, LATCH_EX, root.t_virgin) ); // perform fake read of the new page } // calculate the number of pages to append to last index page uint space_on_last = lgindex_p::max_pids- _pages_on_last_indirect(); uint4_t pages_on_last = MIN(num_pages, space_on_last); // number of pages to place on a new indirect_page uint4_t pages_on_new = num_pages - pages_on_last; // append pages to lpid_t last_index_pid(stid(), _last_indirect()); lgindex_p last_index; W_DO( last_index.fix(last_index_pid, LATCH_EX) ); w_assert1(last_index.is_fixed()); W_DO(last_index.append(pages_on_last, page_list)); if (pages_on_new) { lpid_t new_pid; W_DO(_add_new_indirect(new_pid)); lgindex_p last_index2; W_DO( last_index2.fix(new_pid, LATCH_EX) ); w_assert1(last_index2.is_fixed()); W_DO(last_index2.append(pages_on_new, page_list+pages_on_last)); } return RCOK; }
rc_t lgdata_p::append(const vec_t& data, uint4_t start, uint4_t amount) { FUNC(lgdata_p::append); // new vector at correct start and with correct size if(data.is_zvec()) { const zvec_t amt_vec_tmp(amount); W_DO(splice(0, (slot_length_t) tuple_size(0), 0, amt_vec_tmp)); } else { vec_t new_data(data, u4i(start), u4i(amount)); w_assert9(amount == new_data.size()); W_DO(splice(0, (slot_length_t) tuple_size(0), 0, new_data)); } return RCOK; }
shpid_t lg_tag_indirect_h::_last_indirect() const { FUNC(lg_tag_indirect_h::_last_indirect); if (indirect_type(_page_cnt) == t_large_1) { return _iref.indirect_root; } // read the root page lpid_t root_pid(stid(), _iref.indirect_root); lgindex_p root; W_IGNORE( root.fix(root_pid, LATCH_SH) ); // PAGEFIXBUG if (!root.is_fixed()) return 0; shpid_t* pids = (shpid_t*)root.tuple_addr(0); w_assert9(pids); return(pids[(_page_cnt-1)/lgindex_p::max_pids]); }
rc_t lgdata_p::update(uint4_t offset, const vec_t& data, uint4_t start, uint4_t amount) { FUNC(lgdata_p::update); // new vector at correct start and with correct size if(data.is_zvec()) { const zvec_t amt_vec_tmp(amount); W_DO(splice(0, u4i(offset), u4i(amount), amt_vec_tmp)); } else { vec_t new_data(data, u4i(start), u4i(amount)); w_assert9(amount == new_data.size()); W_DO(splice(0, u4i(offset), u4i(amount), new_data)); } return RCOK; }
rc_t lg_tag_indirect_h::_add_new_indirect(lpid_t& new_pid) { FUNC(lg_tag_indirect_h::_add_new_indirect); // flags for new pages w_base_t::uint4_t flags = lgindex_p::t_virgin; if (indirect_type(_page_cnt) == t_large_1) { // must allocate a new root pid and point it to the current one lpid_t root_pid; W_DO(smlevel_0::io->alloc_a_page(stid(), lpid_t::eof, // near hint root_pid, // npages, array for output pids false, // not may_realloc EX, // lock on pages false // do not search file for free pages )); lgindex_p root; W_DO( root.fix(root_pid, LATCH_EX, flags) ); w_assert1(root.is_fixed()); W_DO(root.append(1, &_iref.indirect_root)); w_assert9(root_pid.stid() == stid()); _iref.indirect_root = root_pid.page; } // allocate a new page and point to it from the root W_DO(smlevel_0::io->alloc_a_page(stid(), lpid_t::eof, // near hint new_pid, // npages, array of output pids false, // not may_realloc EX, // lock on new pages false // do not search file for free pages )); lgindex_p new_page; W_DO( new_page.fix(new_pid, LATCH_EX, flags) ); // format page lpid_t root_pid(stid(), _iref.indirect_root); lgindex_p root; W_DO( root.fix(root_pid, LATCH_EX) ); w_assert1(root.is_fixed()); W_DO(root.append(1, &new_pid.page)); return RCOK; }
rc_t lgindex_p::format(const lpid_t& pid, tag_t tag, uint4_t flags, store_flag_t store_flags ) { w_assert9(tag == t_lgindex_p); // create a 0-length slot vec_t vec; // empty vector /* Do the formatting and insert w/o logging them */ W_DO( page_p::_format(pid, tag, flags, store_flags) ); W_COERCE(page_p::insert_expand(0, 1, &vec, false/*logit*/) ); /* Now, log as one (combined) record: */ rc_t rc = log_page_format(*this, 0, 1, &vec); // lgindex_p return rc; }
void btree_insert_log::undo(PagePtr page) { w_assert9(page == 0); btree_insert_t* dp = (btree_insert_t*) data(); if (true == dp->sys_txn) { // The insertion log record was generated by a page rebalance full logging operation // no 'undo' in this case return; } w_keystr_t key; key.construct_from_keystr(dp->data, dp->klen); // TODO(Restart)... DBGOUT3( << "&&&& UNDO insertion, key: " << key); // ***LOGICAL*** don't grab locks during undo W_COERCE(smlevel_0::bt->remove_as_undo(header._stid, key)); }
ErrLog::ErrLog( const char *ident, // required LoggingDestination dest, // required FILE *file, LogPriority level, // = log_error char *ownbuf, // = 0 int ownbufsz // = 0 ) : _destination(dest), _level(level), _file(file), _ident(ident), _buffer(ownbuf?ownbuf:buffer), _bufsize(ownbuf?ownbufsz:sizeof(buffer)), clog(ownbuf?ownbuf:buffer, ownbuf?ownbufsz:sizeof(buffer)), _magic(ERRORLOG__MAGIC) { _init1(); w_assert9( dest == log_to_open_file ); _init2(); }
/* * truncate() removes pages at the end of large records * implemented as a set of chunks. */ rc_t lg_tag_chunks_h::truncate(uint4_t num_pages) { FUNC(lg_tag_chunks_h::truncate); smsize_t first_dealloc = page_count()-num_pages; smsize_t last_dealloc = page_count()-1; #if W_DEBUG_LEVEL > 2 uint4_t check_dealloc = 0; #endif { // without this bracketing, // VC++ thinks this smsize_t i is in the same // scope as the int i in the next for loop for (smsize_t i = first_dealloc; i <= last_dealloc; i++) { DBG(<<"freeing page " << pid(i)); W_DO(smlevel_0::io->free_page(pid(i))); #if W_DEBUG_LEVEL > 2 check_dealloc++; #endif } } w_assert3(check_dealloc == num_pages); for (int i = _cref.chunk_cnt-1; i >= 0 && num_pages > 0; i--) { if (_cref.chunks[i].npages <= num_pages) { num_pages -= _cref.chunks[i].npages; _cref.chunk_cnt--; // this chunk is not needed } else { _cref.chunks[i].npages -= num_pages; num_pages -= num_pages; } } w_assert9(num_pages == 0); return RCOK; }
rc_t lg_tag_indirect_h::update(uint4_t start_byte, const vec_t& data) const { FUNC(lg_tag_indirect_h::update); uint4_t amount; // amount to update on page uint page_to_update = start_byte/lgdata_p::data_sz; // first page uint4_t data_size = data.size(); lpid_t curr_pid(stid(), 0); uint4_t offset = start_byte%lgdata_p::data_sz; uint4_t num_bytes = 0; while (num_bytes < data_size) { amount = MIN(lgdata_p::data_sz-offset, data_size-num_bytes); curr_pid.page = _pid(page_to_update); lgdata_p lgdata; W_DO( lgdata.fix(curr_pid, LATCH_EX) ); W_DO(lgdata.update(offset, data, num_bytes, amount)); offset = 0; num_bytes += amount; page_to_update++; } w_assert9(page_to_update-1 == (start_byte+data.size()-1)/lgdata_p::data_sz); return RCOK; }
int re_exec_posix(const char* string) { int status; if(!re_ready) { return -1; // no string compiled // possibly because of previous error } status = regexec(&re_posix_re, string, (size_t)0, NULL, 0); if (status != 0) { (void) regerror(status, &re_posix_re, re_error_str, sizeof(re_error_str)); #ifdef DEBUG_REGEX_POSIX cerr << "re_exec_posix: error = " << re_error_str <<endl; cerr << "re_exec_posix: string = " << string <<endl; #endif regfree(&re_posix_re); re_ready = false; return 0; } w_assert9 (status == 0); return 1; // found match }
/* * append adds pages to a large record implemented as * a set of chunks. It returns eFAILURE if the pages cannot be added * (implying that the record must be converted to indirect implmentation */ rc_t lg_tag_chunks_h::append(uint4_t num_pages, const lpid_t new_pages[]) { FUNC(lg_tag_chunks_h::append); int chunk_loc[max_chunks+1]; // marks location of chunks in the list of pages // +1 for marking end of chunks uint4_t chunk_count; // find the chunks in the list of pages chunk_loc[0] = 0; // first chunk is at 0 chunk_count = 1; uint4_t i; for (i = 1; i<num_pages && chunk_count<=max_chunks; i++) { shpid_t curr_chunk_len = i - chunk_loc[chunk_count-1]; if (new_pages[i].page-curr_chunk_len != new_pages[chunk_loc[chunk_count-1]].page) { // new chunk found chunk_loc[chunk_count] = i; chunk_count++; } } if (chunk_count > max_chunks) { // too many chunks return RC(smlevel_0::eBADAPPEND); } chunk_loc[chunk_count] = i; // remember end of chunks to simplify code /* * determine if 1st new chunk is contiguous with last page * in the large record */ int contig = 0; // 1 indicates 1st new chunk is contiguous if (_cref.chunk_cnt == 0 || new_pages[0].page == _last_pid()+1) contig = 1; DBG(<<" chunk_count = " << chunk_count << " contig=" << contig); /* * See if there are too many chunks for the record type. * The contig variable must be ignored if _chunk_cnt == 0. */ if ((chunk_count + (_cref.chunk_cnt>0 ? _cref.chunk_cnt - contig : 0)) > max_chunks) { // too many chunks #if W_DEBUG_LEVEL > 4 cerr << "too many chunks: " << chunk_count << " _cref.chunk_cnt " << _cref.chunk_cnt << " contig " << contig << " max_chunks " << int(max_chunks) << endl; #endif return RC(smlevel_0::eBADAPPEND); } /* * update the chunk list with the new chunks * beware of a possibly contiguous first chunk */ if (contig) { if (_cref.chunk_cnt == 0) { _cref.chunk_cnt = 1; _cref.chunks[0].first_pid = new_pages[0].page; _cref.chunks[0].npages = 0; } _cref.chunks[_cref.chunk_cnt-1].npages += chunk_loc[1] - chunk_loc[0]; } for (i = contig; i < chunk_count; i++) { _cref.chunks[_cref.chunk_cnt-contig+i].first_pid = new_pages[chunk_loc[i]].page; _cref.chunks[_cref.chunk_cnt-contig+i].npages = chunk_loc[i+1] - chunk_loc[i]; DBG(<<" Added to chunk " << i << " page " << _cref.chunks[_cref.chunk_cnt-contig+i].first_pid << " npages=" << _cref.chunks[_cref.chunk_cnt-contig+i].npages ); } w_assert9((_cref.chunk_cnt + chunk_count - contig) <= max_chunks); _cref.chunk_cnt += w_base_t::uint2_t(chunk_count - contig); DBG(<<"ok"); return RCOK; }
istream & w_base_t::_scan_uint8( istream& i, w_base_t::uint8_t &u8, bool chew_white, // true if coming from istream operator bool is_signed, // if true, we have to return an error for overflow bool& range_err // set to true if range error occurred ) { w_base_t::uint8_t thresh=0, thresh2=0 /* thresh2, thresh3, thresh4 for decimal only */, thresh3=0, thresh4=0; w_base_t::uint8_t value = 0; bool negate = false; int e=0; int base=0; bool skip_white = true; states s = start; streampos tell_start = i.tellg(); int chewamt = chew_white? 1 : 0; XTABLE *table=0; range_err = false; { // Get the base from the stream ios_fmtflags old = i.flags(); skip_white = ((old & ios::skipws) != 0); switch(old & ios::basefield) { case 0: base = 0; table = &table_unknown; break; case ios::hex: base = 4; // shift by this table = &table_base16; thresh = is_signed? thresh_hex_signed : thresh_hex_unsigned; break; case ios::oct: base = 3; // shift by this table = &table_base8; thresh = is_signed? thresh_oct_signed : thresh_oct_unsigned; break; case ios::dec: base = 10; // multiply by this table = &table_base10; thresh = is_signed? thresh_dec_signed : thresh_dec_unsigned; thresh2 = is_signed? thresh2_dec_signed : thresh2_dec_unsigned; thresh3 = is_signed? (negate? 8: 7) : 5; thresh4 = is_signed? thresh_hex_signed : thresh_hex_unsigned; break; default: W_FATAL(fcINTERNAL); break; } } int ich; char ch; while (s < end) { ch = 0; // if (i) { ich = i.get(); if (ich != EOF) { ch = char(ich); /* By using isspace() we get locale-dependent behavior */ if(isspace(ch)) { e = white; } else { e = equiv[unsigned(ch)]; } } else e = eofile; // } else { // e = eofile; // } /* transition table */ s = (*table)[e][s]; switch(s) { case start: /* Have seen leading white space */ if(!skip_white) { s = end; } tell_start += chewamt; break; case sgned: if(ch == '-') { negate = true; if(thresh3!=0) thresh3 = is_signed? (negate? 8: 7) : 5; } break; case leadz: /* Have seen 1 or more leading zeroes * if base is 0 (unstated), 0 or 0x will * determine the base. */ break; case new_hex: /* State means we've seen [0][a-f] or 0[xX] */ if(base && (base != 4)) { /* consider this the end of the string */ IOS_BACK(i, ch); s = end; break; } w_assert9(base == 0 || base == 4); if((base == 0) && (e != exx)) { /* consider this the end of the string */ IOS_BACK(i, ch); s = end; break; } /* at this point, in the 0[xX] case, * we WILL make a conversion, * if nothing else, it will be to 0. In event * of error (the char after the [Xx] is not * a legit hex digit) we have to be able to * seek back to where the [Xx] was, rather than * leave the endptr at the offending digit. */ base = 4; // 2 ** base, i.e., shift amt if(e != exx) { IOS_BACK(i, ch); } else { /* XXX used to be tellg()-1, but no streampos arith allowed that way. */ tell_start = i.tellg(); tell_start -= 1; // for possible error-handling } thresh = is_signed? thresh_hex_signed : thresh_hex_unsigned; break; case new_oct: /* State means we've seen 0 followed by [1-7] */ if(base==0 || base == 3) { /* treat as oct # */ base = 3; // shift amt thresh = is_signed? thresh_oct_signed : thresh_oct_unsigned; } else if(base == 10) { s = new_dec; thresh = is_signed? thresh_dec_signed : thresh_dec_unsigned; thresh2= is_signed?thresh2_dec_signed : thresh2_dec_unsigned; thresh3 = is_signed? (negate? 8: 7) : 5; thresh4 = is_signed? thresh_hex_signed : thresh_hex_unsigned; } else { w_assert9(base == 4); s = new_hex; thresh = is_signed? thresh_hex_signed : thresh_hex_unsigned; } IOS_BACK(i, ch); break; case new_dec: /* State means we've seen [1-9] in start/sgned state * or 0 followed by [8-9] */ if(e == eight || e == nine) { if(base && base != 10) { /* consider this the end of the string */ IOS_BACK(i, ch); s = end; break; } } if(base==0 || base == 10) { /* treat as dec # */ base = 10; // multiply amt thresh = is_signed? thresh_dec_signed : thresh_dec_unsigned; thresh2= is_signed?thresh2_dec_signed : thresh2_dec_unsigned; thresh3 = is_signed? (negate? 8: 7) : 5; thresh4 = is_signed? thresh_hex_signed : thresh_hex_unsigned; } else if(base == 3) { s = new_oct; thresh = is_signed? thresh_oct_signed : thresh_oct_unsigned; } else { w_assert9(base == 4); thresh = is_signed? thresh_hex_signed : thresh_hex_unsigned; s = new_hex; } IOS_BACK(i, ch); break; case is_hex: w_assert9(base == 4); /* drop down */ case is_oct: if(value & thresh) { range_err = true; // keep parsing // s = end; break; } /* shift */ value <<= base; value += int(e); break; case is_dec: w_assert9(base == 10); if(value & thresh4) { if(value > thresh2) { /* will overflow on multiply */ range_err = true; // keep parsing // s = end; break; } value *= base; if((value - thresh2) + unsigned(e) > thresh3) { /* overflow adding in e */ range_err = true; // keep parsing // s = end; break; } } else { /* multiply */ value *= base; } value += unsigned(e); break; case error: IOS_FAIL(i); i.seekg(tell_start); s = end; break; case no_hex: i.seekg(tell_start); s = end; break; case end: IOS_BACK(i, ch); break; case no_state: W_FATAL(fcINTERNAL); break; } } if(range_err) { // don't seek to start u8 = negate ? ( is_signed? w_base_t::int8_min : w_base_t::uint8_max) : ( is_signed? w_base_t::int8_max : w_base_t::uint8_max); IOS_FAIL(i); } else { u8 = negate ? (0 - value) : value; } return i; }
/* * truncate() removes pages at the end of large records * implemented as indirect blocks */ rc_t lg_tag_indirect_h::truncate(uint4_t num_pages) { FUNC(lg_tag_indirect_h::truncate); int i; int first_dealloc = (int)(_page_cnt-num_pages); int last_dealloc = (int)(_page_cnt-1); recflags_t rec_type = indirect_type(_page_cnt); for (i = first_dealloc; i <= last_dealloc; i++) { W_DO(smlevel_0::io->free_page(pid(i))); } int indirect_rm_count = 0; // # indirect pages to remove int pids_rm_by_indirect = 0; // # data pids removed // by removing indirect pages int pids_to_rm = 0; // # of pids to rm from last indirect // indirect pages are only removed if t_large_2 if (rec_type == t_large_2) { uint pids_on_last = _pages_on_last_indirect(); if (pids_on_last > num_pages) { indirect_rm_count = 0; } else { indirect_rm_count = (num_pages-1)/lgindex_p::max_pids+1; pids_rm_by_indirect = pids_on_last+(indirect_rm_count-1)*lgindex_p::max_pids; } } pids_to_rm = num_pages-pids_rm_by_indirect; // remove any indirect pages we can if (indirect_rm_count > 0) { lpid_t root_pid(stid(), _iref.indirect_root); lgindex_p root; W_DO( root.fix(root_pid, LATCH_EX) ); w_assert1(root.is_fixed()); // first deallocate the indirect pages w_assert9(root.pid_count() == _page_cnt/lgindex_p::max_pids + 1); first_dealloc = root.pid_count()-indirect_rm_count; last_dealloc = root.pid_count()-1; for (i = first_dealloc; i <= last_dealloc; i++) { lpid_t pid_to_free(stid(), root.pids(i)); W_DO(smlevel_0::io->free_page(pid_to_free)); } // if will be only one indirect page left, then remember // the ID of this page /* shpid_t new_indirect_root; if (indirect_type(_page_cnt-num_pages) == t_large_1) { new_indirect_root = root->pids(0); } */ W_DO(root.truncate(indirect_rm_count)); // if there is only one indirect page left, then switch to // a single level tree if (indirect_type(_page_cnt-num_pages) == t_large_1) { w_assert9( ((_page_cnt-1)/lgindex_p::max_pids+1) - indirect_rm_count == 0); if (root.pid_count() > 0) { // there is at least one page left in the record, // the the first page in the root becomes the new root _iref.indirect_root = root.pids(0); } else { // all pages have been removed, so there is no root _iref.indirect_root = 0; } root.unfix(); W_DO(smlevel_0::io->free_page(root_pid)); } } // if we have not removed all pages, we must truncate pids // from the last indirect page if (_page_cnt > num_pages) { // get the pid for the last indirect page before truncate point // temporarily lower _page_cnt _page_cnt -= num_pages; lpid_t last_index_pid(stid(), _last_indirect()); _page_cnt += num_pages; lgindex_p last_index; W_DO( last_index.fix(last_index_pid, LATCH_EX) ); w_assert1(last_index.is_fixed()); W_DO(last_index.truncate(pids_to_rm)); } else { w_assert9(_page_cnt == num_pages); // we have removed all data pages, so remove the root // (assuming it is not 0 meaning it has already been removed) if (_iref.indirect_root != 0) { lpid_t root_pid(stid(), _iref.indirect_root); W_DO(smlevel_0::io->free_page(root_pid)); _iref.indirect_root = 0; // mark that there is no root } } return RCOK; }