Esempio n. 1
0
static void
list_root (IAnjutaProject *project, AnjutaProjectNode *root)
{
	print ("ROOT (%s): %s", no_id ? "" : "0", anjuta_project_node_get_name (root));
	list_property (project, root, 1);
	list_children (project, root, root, 0, "0");
}
Esempio n. 2
0
static void
list_module (IAnjutaProject *project, AnjutaProjectNode *root, AnjutaProjectNode *module, gint indent, const gchar *path)
{
	print ("%*sMODULE (%s): %s", indent * INDENT, "", no_id ? "" : path, anjuta_project_node_get_name (module));

	list_property (project, module, indent + 1);

	list_children (project, root, module, indent, path);
}
Esempio n. 3
0
static void
list_target (IAnjutaProject *project, AnjutaProjectNode *root, AnjutaProjectNode *target, gint indent, const gchar *path)
{
	print ("%*sTARGET (%s): %s", indent * INDENT, "", no_id ? "" : path, anjuta_project_node_get_name (target));

	list_property (project, target, indent + 1);

	list_children (project, root, target, indent, path);
}
Esempio n. 4
0
static void
list_group (IAnjutaProject *project, AnjutaProjectNode *root, AnjutaProjectNode *group, gint indent, const gchar *path)
{
	AnjutaProjectNode *parent;
	gchar *rel_path;
	GFile *file;

	parent = anjuta_project_node_parent (group);
	file = anjuta_project_node_get_file (parent);
	rel_path = g_file_get_relative_path (file, anjuta_project_node_get_file (group));
	print ("%*sGROUP (%s): %s", indent * INDENT, "", no_id ? "" : path, rel_path);
	g_free (rel_path);

	list_property (project, group, indent + 1);

	list_children (project, root, group, indent, path);
}
ErrorStack MasstreeStoragePimpl::fatify_first_root_double(thread::Thread* context) {
  MasstreeIntermediatePage* root;
  WRAP_ERROR_CODE(get_first_root(context, true, &root));
  ASSERT_ND(root->is_locked());
  ASSERT_ND(!root->is_moved());

  // assure that all children have volatile version
  for (MasstreeIntermediatePointerIterator it(root); it.is_valid(); it.next()) {
    if (it.get_pointer().volatile_pointer_.is_null()) {
      MasstreePage* child;
      WRAP_ERROR_CODE(follow_page(
        context,
        true,
        const_cast<DualPagePointer*>(&it.get_pointer()),
        &child));
    }
    ASSERT_ND(!it.get_pointer().volatile_pointer_.is_null());
  }

  std::vector<Child> original_children = list_children(root);
  ASSERT_ND(original_children.size() * 2U <= kMaxIntermediatePointers);
  std::vector<Child> new_children;
  for (const Child& child : original_children) {
    CHECK_ERROR(split_a_child(context, root, child, &new_children));
  }
  ASSERT_ND(new_children.size() >= original_children.size());

  memory::NumaCoreMemory* memory = context->get_thread_memory();
  memory::PagePoolOffset new_offset = memory->grab_free_volatile_page();
  if (new_offset == 0) {
    return ERROR_STACK(kErrorCodeMemoryNoFreePages);
  }
  // from now on no failure (we grabbed a free page).

  VolatilePagePointer new_pointer = combine_volatile_page_pointer(
    context->get_numa_node(),
    kVolatilePointerFlagSwappable,  // pointer to root page might be swapped!
    get_first_root_pointer().volatile_pointer_.components.mod_count + 1,
    new_offset);
  MasstreeIntermediatePage* new_root
    = context->resolve_newpage_cast<MasstreeIntermediatePage>(new_pointer);
  new_root->initialize_volatile_page(
    get_id(),
    new_pointer,
    0,
    root->get_btree_level(),  // same as current root. this is not grow_root
    kInfimumSlice,
    kSupremumSlice);
  // no concurrent access to the new page, but just for the sake of assertion in the func.
  PageVersionLockScope new_scope(context, new_root->get_version_address());
  new_root->split_foster_migrate_records_new_first_root(&new_children);
  ASSERT_ND(count_children(new_root) == new_children.size());
  verify_new_root(context, new_root, new_children);

  // set the new first-root pointer.
  assorted::memory_fence_release();
  get_first_root_pointer().volatile_pointer_.word = new_pointer.word;
  // first-root snapshot pointer is unchanged.

  // old root page and the direct children are now retired
  assorted::memory_fence_acq_rel();
  root->set_moved();  // not quite moved, but assertions assume that.
  root->set_retired();
  context->collect_retired_volatile_page(
    construct_volatile_page_pointer(root->header().page_id_));
  for (const Child& child : original_children) {
    MasstreePage* original_page = context->resolve_cast<MasstreePage>(child.pointer_);
    if (original_page->is_moved()) {
      PageVersionLockScope scope(context, original_page->get_version_address());
      original_page->set_retired();
      context->collect_retired_volatile_page(child.pointer_);
    } else {
      // This means, the page had too small records to split. We must keep it.
    }
  }
  assorted::memory_fence_acq_rel();

  LOG(INFO) << "Split done. " << original_children.size() << " -> " << new_children.size();

  return kRetOk;
}