ErrorStack MasstreeStoragePimpl::verify_single_thread(thread::Thread* context) { MasstreeIntermediatePage* layer_root; WRAP_ERROR_CODE(get_first_root(context, false, &layer_root)); CHECK_AND_ASSERT(!layer_root->is_border()); // root of first layer is always intermediate page CHECK_ERROR(verify_single_thread_layer(context, 0, layer_root)); return kRetOk; }
///===============Follow a link in the page based on the name=======================/// void Browser::follow_link(std::string name_of_link_to_follow, int usertimeout=20) { if(links.size()==0) { std::cerr<<"\n[!] No links found\n"; return; } std::string to_follow=""; for(int i=0;i<links.size();i++) { if(links[i].name()==name_of_link_to_follow) { to_follow=links[i].url(); break; } } if(to_follow=="") { std::cerr<<"\n[!] No such link in the page found\n"; return; } //if the link is already a complete one we open it directly else we add the site root if(word_in(to_follow,"http://")) { open(to_follow,usertimeout); } else { std::string now_on = get_first_root(); //here we are on the last slash if(to_follow[0]!='/') { open(now_on+to_follow); } else { open(now_on.substr(0,now_on.size()-1)+to_follow); } } }
ErrorStack MasstreeStoragePimpl::fatify_first_root( thread::Thread* context, uint32_t desired_count) { LOG(INFO) << "Masstree-" << get_name() << " being fatified for " << desired_count; if (desired_count > kMaxIntermediatePointers) { LOG(INFO) << "desired_count too large. adjusted to the max"; desired_count = kMaxIntermediatePointers; } // Check if the volatile page is moved. If so, grow it. while (true) { MasstreeIntermediatePage* root; WRAP_ERROR_CODE(get_first_root(context, true, &root)); if (root->has_foster_child()) { // oh, the root page needs to grow LOG(INFO) << "oh, the root page needs to grow"; WRAP_ERROR_CODE(grow_root( context, &get_first_root_pointer(), &get_first_root_owner(), &root)); // then retry } else { break; } } while (true) { // lock the first root. xct::McsLockScope owner_scope(context, &get_first_root_owner()); LOG(INFO) << "Locked the root page owner address."; MasstreeIntermediatePage* root; WRAP_ERROR_CODE(get_first_root(context, true, &root)); PageVersionLockScope scope(context, root->get_version_address()); LOG(INFO) << "Locked the root page itself."; if (root->has_foster_child()) { LOG(WARNING) << "Mm, I thought I grew the root, but concurrent xct again moved it. " << " Gave up fatifying. Should be super-rare."; return kRetOk; } ASSERT_ND(root->is_locked()); ASSERT_ND(!root->is_moved()); uint32_t current_count = count_children(root); LOG(INFO) << "Masstree-" << get_name() << " currently has " << current_count << " children"; if (current_count >= desired_count || current_count >= (kMaxIntermediatePointers / 2U)) { LOG(INFO) << "Already enough fat. Done"; break; } LOG(INFO) << "Splitting..."; CHECK_ERROR(fatify_first_root_double(context)); WRAP_ERROR_CODE(get_first_root(context, true, &root)); uint32_t new_count = count_children(root); if (new_count == current_count) { LOG(INFO) << "Seems like we can't split any more."; break; } } return kRetOk; }
ErrorStack MasstreeStoragePimpl::fatify_first_root_double(thread::Thread* context) { MasstreeIntermediatePage* root; WRAP_ERROR_CODE(get_first_root(context, true, &root)); ASSERT_ND(root->is_locked()); ASSERT_ND(!root->is_moved()); // assure that all children have volatile version for (MasstreeIntermediatePointerIterator it(root); it.is_valid(); it.next()) { if (it.get_pointer().volatile_pointer_.is_null()) { MasstreePage* child; WRAP_ERROR_CODE(follow_page( context, true, const_cast<DualPagePointer*>(&it.get_pointer()), &child)); } ASSERT_ND(!it.get_pointer().volatile_pointer_.is_null()); } std::vector<Child> original_children = list_children(root); ASSERT_ND(original_children.size() * 2U <= kMaxIntermediatePointers); std::vector<Child> new_children; for (const Child& child : original_children) { CHECK_ERROR(split_a_child(context, root, child, &new_children)); } ASSERT_ND(new_children.size() >= original_children.size()); memory::NumaCoreMemory* memory = context->get_thread_memory(); memory::PagePoolOffset new_offset = memory->grab_free_volatile_page(); if (new_offset == 0) { return ERROR_STACK(kErrorCodeMemoryNoFreePages); } // from now on no failure (we grabbed a free page). VolatilePagePointer new_pointer = combine_volatile_page_pointer( context->get_numa_node(), kVolatilePointerFlagSwappable, // pointer to root page might be swapped! get_first_root_pointer().volatile_pointer_.components.mod_count + 1, new_offset); MasstreeIntermediatePage* new_root = context->resolve_newpage_cast<MasstreeIntermediatePage>(new_pointer); new_root->initialize_volatile_page( get_id(), new_pointer, 0, root->get_btree_level(), // same as current root. this is not grow_root kInfimumSlice, kSupremumSlice); // no concurrent access to the new page, but just for the sake of assertion in the func. PageVersionLockScope new_scope(context, new_root->get_version_address()); new_root->split_foster_migrate_records_new_first_root(&new_children); ASSERT_ND(count_children(new_root) == new_children.size()); verify_new_root(context, new_root, new_children); // set the new first-root pointer. assorted::memory_fence_release(); get_first_root_pointer().volatile_pointer_.word = new_pointer.word; // first-root snapshot pointer is unchanged. // old root page and the direct children are now retired assorted::memory_fence_acq_rel(); root->set_moved(); // not quite moved, but assertions assume that. root->set_retired(); context->collect_retired_volatile_page( construct_volatile_page_pointer(root->header().page_id_)); for (const Child& child : original_children) { MasstreePage* original_page = context->resolve_cast<MasstreePage>(child.pointer_); if (original_page->is_moved()) { PageVersionLockScope scope(context, original_page->get_version_address()); original_page->set_retired(); context->collect_retired_volatile_page(child.pointer_); } else { // This means, the page had too small records to split. We must keep it. } } assorted::memory_fence_acq_rel(); LOG(INFO) << "Split done. " << original_children.size() << " -> " << new_children.size(); return kRetOk; }