//pos start from 1 // void extend_rule(struct mc *m, struct mb_node_v6 *node, uint32_t pos, int level, struct next_hop_info *nhi, int use_mm) { int child_num = count_children(node->external); int rule_num = count_children(node->internal) - 1; struct next_hop_info **i; struct next_hop_info **j; void *n = new_node(m, child_num, rule_num + 1, level, use_mm); if (child_num != 0){ //copy the child memcpy(n,node->child_ptr,sizeof(struct mb_node_v6)*UP_CHILD(child_num)); #ifdef UP_STATS if (st_f){ node_cpy += UP_CHILD(child_num); } #endif } #ifdef UP_STATS if (st_f) { node_alloc++; } #endif //insert the rule at the pos position if (rule_num != 0) { //copy the 1~pos-1 part i = (struct next_hop_info**)node->child_ptr - pos + 1; j = (struct next_hop_info**)n - pos + 1; memcpy(j,i,(pos-1)*sizeof(struct next_hop_info*)); //copy the pos~rule_num part i = (struct next_hop_info**)node->child_ptr - rule_num; j = (struct next_hop_info**)n - rule_num - 1; memcpy(j,i,(rule_num - pos + 1)*sizeof(struct next_hop_info*)); #ifdef UP_STATS if (st_f) { node_cpy += UP_RULE(rule_num); } #endif } i = (struct next_hop_info**)n - pos; *i = nhi; //need to be atomic if (node->child_ptr) { free_node(m, POINT(node->child_ptr) - UP_RULE(rule_num), UP_CHILD(child_num) + UP_RULE(rule_num), level, use_mm); } node->child_ptr = n; }
void reduce_rule(struct mc *m, struct mb_node_v6 *node, uint32_t pos, int level, int use_mm) { int child_num = count_children(node->external); int rule_num = count_children(node->internal); struct next_hop_info **i; struct next_hop_info **j; if (rule_num < 1){ printf("reduce_rule: error!\n"); return; } void *n = new_node(m, child_num, rule_num - 1, level, use_mm); if (child_num != 0){ //copy the child memcpy(n,node->child_ptr,sizeof(struct mb_node_v6)*UP_CHILD(child_num)); #ifdef UP_STATS if(st_f) node_cpy += UP_CHILD(child_num); #endif } //delete the rule at the pos position if (rule_num > 1) { //copy the 1~pos-1 part i = (struct next_hop_info**)node->child_ptr - pos + 1; j = (struct next_hop_info**)n - pos + 1; memcpy(j,i,(pos-1)*sizeof(struct next_hop_info*)); //copy the pos~rule_num part i = (struct next_hop_info**)node->child_ptr - rule_num; j = (struct next_hop_info**)n - rule_num + 1; memcpy(j,i,(rule_num - pos)*sizeof(struct next_hop_info*)); #ifdef UP_STATS if(st_f) node_cpy += UP_RULE(rule_num); #endif } #ifdef UP_STATS if(st_f) node_alloc++; #endif //need to be atomic if (node->child_ptr) { free_node(m, POINT(node->child_ptr) - UP_RULE(rule_num), UP_CHILD(child_num) + UP_RULE(rule_num), level, use_mm); } node->child_ptr = n; }
void reduce_child(struct mc *m, struct mb_node_v6 *node, int pos, int level, int use_mm) { int child_num = count_children(node->external); int rule_num = count_children(node->internal); if (child_num < 1){ printf("reduce_rule: error!\n"); } void *n = new_node(m, child_num -1 ,rule_num, level, use_mm); struct next_hop_info **i; struct next_hop_info **j; if (rule_num != 0) { //copy the rules i = (struct next_hop_info **)node->child_ptr - rule_num; j = (struct next_hop_info **)n - rule_num; memcpy(j,i,(rule_num)*sizeof(struct next_hop_info*)); #ifdef UP_STATS if (st_f) node_cpy += UP_RULE(rule_num); #endif } if (child_num > 1) { //copy the 0~pos-1 part memcpy(n,node->child_ptr,(pos)*sizeof(struct mb_node_v6)); //copy the pos+1~child_num part to pos~child_num-1 memcpy((struct mb_node_v6*)n+pos,(struct mb_node_v6*)node->child_ptr + pos + 1, (child_num - pos -1) * sizeof(struct mb_node_v6)); #ifdef UP_STATS if(st_f) node_cpy += UP_CHILD(child_num); #endif } #ifdef UP_STATS if(st_f) node_alloc++; #endif //need to be atomic if (node->child_ptr) { free_node(m, POINT(node->child_ptr) - UP_RULE(rule_num), UP_CHILD(child_num) + UP_RULE(rule_num), level, use_mm); } node->child_ptr = n; }
bool MatrixGraph::readXml(const char* path) { char* data = nullptr; xml_document<> doc; try { file<> xmlFile(path); data = new char[xmlFile.size()]; memcpy(data, xmlFile.data(), xmlFile.size() * sizeof(char)); } catch (runtime_error e) { cout << "Nie mozna otworzyc pliku: " << path << endl; if (data) delete[] data; cin.get(); cin.ignore(); return false; } bool retVal = false; doc.parse<0>(data); xml_node<>* node = doc.first_node("travellingSalesmanProblemInstance"); if (node) { node = node->first_node("graph"); if (node) { uint vNum = count_children(node); reserve(vNum); node = node->first_node("vertex"); for (uint vertex = 0; node != nullptr; node = node->next_sibling("vertex"), ++vertex) { xml_node<>* edge = node->first_node("edge"); for (; edge != nullptr; edge = edge->next_sibling("edge")) { matrix[vertex][atoi(edge->value())] = int(atof(edge->first_attribute("cost")->value())); retVal = true; } } for (uint i = 0; i < vertexNumber; i++) { matrix[i][i] = -1; } } } if (data) delete[] data; return retVal; }
void mem_subtrie(struct mb_node_v6 *n, struct mem_stats_v6 *ms) { int stride; int pos; struct mb_node_v6 *next; int child_num = count_children(n->external); int rule_num = count_children(n->internal); ms->mem += (UP_RULE(rule_num) + UP_CHILD(child_num)) * NODE_SIZE; ms->node += (UP_RULE(rule_num) + UP_CHILD(child_num)); for (stride = 0; stride < (1<<STRIDE); stride ++ ){ pos = count_enl_bitmap(stride); if (test_bitmap(n->external, pos)) { next = (struct mb_node_v6 *)n->child_ptr + count_ones(n->external, pos); mem_subtrie(next, ms); } } }
void verify_new_root( thread::Thread* context, MasstreeIntermediatePage* new_root, const std::vector<Child>& new_children) { // this verification runs even in release mode. we must be super careful on root page. uint32_t count = new_children.size(); uint32_t actual_count = count_children(new_root); ASSERT_ND(actual_count == count); if (actual_count != count) { LOG(FATAL) << "Child count doesn't match! expected=" << count << ", actual=" << actual_count; } ASSERT_ND(!new_root->is_border()); ASSERT_ND(new_root->get_layer() == 0); ASSERT_ND(new_root->get_low_fence() == kInfimumSlice); ASSERT_ND(new_root->is_high_fence_supremum()); uint32_t cur = 0; for (MasstreeIntermediatePointerIterator it(new_root); it.is_valid(); it.next()) { ASSERT_ND(it.get_pointer().snapshot_pointer_ == 0); VolatilePagePointer pointer = it.get_pointer().volatile_pointer_; ASSERT_ND(!pointer.is_null()); if (pointer.is_null()) { LOG(FATAL) << "Nullptr? wtf"; } const Child& child = new_children[cur]; ASSERT_ND(pointer.is_equivalent(child.pointer_)); ASSERT_ND(it.get_low_key() == child.low_); ASSERT_ND(it.get_high_key() == child.high_); if (!pointer.is_equivalent(child.pointer_) || it.get_low_key() != child.low_ || it.get_high_key() != child.high_) { LOG(FATAL) << "Separator or pointer does not match!"; } MasstreePage* page = context->resolve_cast<MasstreePage>(pointer); ASSERT_ND(page->get_low_fence() == child.low_); ASSERT_ND(page->get_high_fence() == child.high_); if (page->get_low_fence() != child.low_ || page->get_high_fence() != child.high_) { LOG(FATAL) << "Fence key doesnt match!"; } ++cur; } ASSERT_ND(cur == count); }
static guint count_children (GtkTreeModel *model, GtkTreeIter *parent) { GtkTreeIter iter; guint count = 0; gboolean valid; for (valid = gtk_tree_model_iter_children (model, &iter, parent); valid; valid = gtk_tree_model_iter_next (model, &iter)) { count += count_children (model, &iter) + 1; } return count; }
inT32 OL_BUCKETS::count_children( // recursive count C_OUTLINE *outline, // parent outline inT32 max_count // max output ) { BOOL8 parent_box; // could it be boxy inT16 xmin, xmax; // coord limits inT16 ymin, ymax; inT16 xindex, yindex; // current bucket C_OUTLINE *child; // current child inT32 child_count; // no of children inT32 grandchild_count; // no of grandchildren inT32 parent_area; // potential box FLOAT32 max_parent_area; // potential box inT32 child_area; // current child inT32 child_length; // current child TBOX olbox; C_OUTLINE_IT child_it; // search iterator olbox = outline->bounding_box(); xmin =(olbox.left() - bl.x()) / BUCKETSIZE; xmax =(olbox.right() - bl.x()) / BUCKETSIZE; ymin =(olbox.bottom() - bl.y()) / BUCKETSIZE; ymax =(olbox.top() - bl.y()) / BUCKETSIZE; child_count = 0; grandchild_count = 0; parent_area = 0; max_parent_area = 0; parent_box = TRUE; for (yindex = ymin; yindex <= ymax; yindex++) { for (xindex = xmin; xindex <= xmax; xindex++) { child_it.set_to_list(&buckets[yindex * bxdim + xindex]); if (child_it.empty()) continue; for (child_it.mark_cycle_pt(); !child_it.cycled_list(); child_it.forward()) { child = child_it.data(); if (child != outline && *child < *outline) { child_count++; if (child_count <= max_count) { int max_grand =(max_count - child_count) / edges_children_per_grandchild; if (max_grand > 0) grandchild_count += count_children(child, max_grand) * edges_children_per_grandchild; else grandchild_count += count_children(child, 1); } if (child_count + grandchild_count > max_count) { if (edges_debug) tprintf("Discarding parent with child count=%d, gc=%d\n", child_count,grandchild_count); return child_count + grandchild_count; } if (parent_area == 0) { parent_area = outline->outer_area(); if (parent_area < 0) parent_area = -parent_area; max_parent_area = outline->bounding_box().area() * edges_boxarea; if (parent_area < max_parent_area) parent_box = FALSE; } if (parent_box && (!edges_children_fix || child->bounding_box().height() > edges_min_nonhole)) { child_area = child->outer_area(); if (child_area < 0) child_area = -child_area; if (edges_children_fix) { if (parent_area - child_area < max_parent_area) { parent_box = FALSE; continue; } if (grandchild_count > 0) { if (edges_debug) tprintf("Discarding parent of area %d, child area=%d, max%g " "with gc=%d\n", parent_area, child_area, max_parent_area, grandchild_count); return max_count + 1; } child_length = child->pathlength(); if (child_length * child_length > child_area * edges_patharea_ratio) { if (edges_debug) tprintf("Discarding parent of area %d, child area=%d, max%g " "with child length=%d\n", parent_area, child_area, max_parent_area, child_length); return max_count + 1; } } if (child_area < child->bounding_box().area() * edges_childarea) { if (edges_debug) tprintf("Discarding parent of area %d, child area=%d, max%g " "with child rect=%d\n", parent_area, child_area, max_parent_area, child->bounding_box().area()); return max_count + 1; } } } } } } return child_count + grandchild_count; }
static void set_rows (GtkTreeView *treeview, guint i) { g_assert (i == count_children (gtk_tree_view_get_model (treeview), NULL)); g_object_set_data (G_OBJECT (treeview), "rows", GUINT_TO_POINTER (i)); }
void destroy_subtrie(struct mb_node_v6 *node, void (*destroy_nhi)(struct next_hop_info *nhi), struct mc *m, int depth, int use_mm) #endif { int bit; int cidr; int pos; struct next_hop_info ** nhi = NULL; int stride; struct mb_node_v6 *next = NULL; int cnt_rules; struct mb_node_v6 *first = NULL; for (cidr=0;cidr<= STRIDE -1;cidr ++ ){ for (bit=0;bit< (1<<cidr);bit++) { pos = count_inl_bitmap(bit,cidr); if (test_bitmap(node->internal, pos)) { nhi = (struct next_hop_info**)node->child_ptr - count_ones(node->internal, pos) - 1; if (destroy_nhi && *nhi != NULL) { destroy_nhi(*nhi); } *nhi = NULL; } } } for (stride = 0; stride < (1<<STRIDE); stride ++ ){ pos = count_enl_bitmap(stride); if (test_bitmap(node->external, pos)) { next = (struct mb_node_v6 *)node->child_ptr + count_ones(node->external, pos); #ifndef USE_MM destroy_subtrie(next, destroy_nhi); #else destroy_subtrie(next, destroy_nhi, m, depth + 1, use_mm); #endif } } cnt_rules = count_children(node->internal); first = POINT(node->child_ptr) - UP_RULE(cnt_rules); #ifdef DEBUG_MEMORY_FREE int cnt = count_children(node->internal); mem_destroy += UP_RULE(cnt) * NODE_SIZE; cnt = count_children(node->external); mem_destroy += UP_CHILD(cnt) * NODE_SIZE; #endif node->internal = 0; node->external = 0; node->child_ptr = NULL; #ifdef USE_MM //printf("not supported\n"); int cnt_children = count_children(node->external); free_node(m, first, UP_RULE(cnt_rules) + UP_CHILD(cnt_children), depth, use_mm); #else free(first); #endif }
ErrorStack MasstreeStoragePimpl::fatify_first_root( thread::Thread* context, uint32_t desired_count) { LOG(INFO) << "Masstree-" << get_name() << " being fatified for " << desired_count; if (desired_count > kMaxIntermediatePointers) { LOG(INFO) << "desired_count too large. adjusted to the max"; desired_count = kMaxIntermediatePointers; } // Check if the volatile page is moved. If so, grow it. while (true) { MasstreeIntermediatePage* root; WRAP_ERROR_CODE(get_first_root(context, true, &root)); if (root->has_foster_child()) { // oh, the root page needs to grow LOG(INFO) << "oh, the root page needs to grow"; WRAP_ERROR_CODE(grow_root( context, &get_first_root_pointer(), &get_first_root_owner(), &root)); // then retry } else { break; } } while (true) { // lock the first root. xct::McsLockScope owner_scope(context, &get_first_root_owner()); LOG(INFO) << "Locked the root page owner address."; MasstreeIntermediatePage* root; WRAP_ERROR_CODE(get_first_root(context, true, &root)); PageVersionLockScope scope(context, root->get_version_address()); LOG(INFO) << "Locked the root page itself."; if (root->has_foster_child()) { LOG(WARNING) << "Mm, I thought I grew the root, but concurrent xct again moved it. " << " Gave up fatifying. Should be super-rare."; return kRetOk; } ASSERT_ND(root->is_locked()); ASSERT_ND(!root->is_moved()); uint32_t current_count = count_children(root); LOG(INFO) << "Masstree-" << get_name() << " currently has " << current_count << " children"; if (current_count >= desired_count || current_count >= (kMaxIntermediatePointers / 2U)) { LOG(INFO) << "Already enough fat. Done"; break; } LOG(INFO) << "Splitting..."; CHECK_ERROR(fatify_first_root_double(context)); WRAP_ERROR_CODE(get_first_root(context, true, &root)); uint32_t new_count = count_children(root); if (new_count == current_count) { LOG(INFO) << "Seems like we can't split any more."; break; } } return kRetOk; }
ErrorStack MasstreeStoragePimpl::fatify_first_root_double(thread::Thread* context) { MasstreeIntermediatePage* root; WRAP_ERROR_CODE(get_first_root(context, true, &root)); ASSERT_ND(root->is_locked()); ASSERT_ND(!root->is_moved()); // assure that all children have volatile version for (MasstreeIntermediatePointerIterator it(root); it.is_valid(); it.next()) { if (it.get_pointer().volatile_pointer_.is_null()) { MasstreePage* child; WRAP_ERROR_CODE(follow_page( context, true, const_cast<DualPagePointer*>(&it.get_pointer()), &child)); } ASSERT_ND(!it.get_pointer().volatile_pointer_.is_null()); } std::vector<Child> original_children = list_children(root); ASSERT_ND(original_children.size() * 2U <= kMaxIntermediatePointers); std::vector<Child> new_children; for (const Child& child : original_children) { CHECK_ERROR(split_a_child(context, root, child, &new_children)); } ASSERT_ND(new_children.size() >= original_children.size()); memory::NumaCoreMemory* memory = context->get_thread_memory(); memory::PagePoolOffset new_offset = memory->grab_free_volatile_page(); if (new_offset == 0) { return ERROR_STACK(kErrorCodeMemoryNoFreePages); } // from now on no failure (we grabbed a free page). VolatilePagePointer new_pointer = combine_volatile_page_pointer( context->get_numa_node(), kVolatilePointerFlagSwappable, // pointer to root page might be swapped! get_first_root_pointer().volatile_pointer_.components.mod_count + 1, new_offset); MasstreeIntermediatePage* new_root = context->resolve_newpage_cast<MasstreeIntermediatePage>(new_pointer); new_root->initialize_volatile_page( get_id(), new_pointer, 0, root->get_btree_level(), // same as current root. this is not grow_root kInfimumSlice, kSupremumSlice); // no concurrent access to the new page, but just for the sake of assertion in the func. PageVersionLockScope new_scope(context, new_root->get_version_address()); new_root->split_foster_migrate_records_new_first_root(&new_children); ASSERT_ND(count_children(new_root) == new_children.size()); verify_new_root(context, new_root, new_children); // set the new first-root pointer. assorted::memory_fence_release(); get_first_root_pointer().volatile_pointer_.word = new_pointer.word; // first-root snapshot pointer is unchanged. // old root page and the direct children are now retired assorted::memory_fence_acq_rel(); root->set_moved(); // not quite moved, but assertions assume that. root->set_retired(); context->collect_retired_volatile_page( construct_volatile_page_pointer(root->header().page_id_)); for (const Child& child : original_children) { MasstreePage* original_page = context->resolve_cast<MasstreePage>(child.pointer_); if (original_page->is_moved()) { PageVersionLockScope scope(context, original_page->get_version_address()); original_page->set_retired(); context->collect_retired_volatile_page(child.pointer_); } else { // This means, the page had too small records to split. We must keep it. } } assorted::memory_fence_acq_rel(); LOG(INFO) << "Split done. " << original_children.size() << " -> " << new_children.size(); return kRetOk; }
ErrorStack split_a_child( thread::Thread* context, MasstreeIntermediatePage* root, Child original, std::vector<Child>* out) { ASSERT_ND(!original.pointer_.is_null()); MasstreeIntermediatePage::MiniPage& minipage = root->get_minipage(original.index_); ASSERT_ND( minipage.pointers_[original.index_mini_].volatile_pointer_.is_equivalent(original.pointer_)); MasstreePage* original_page = context->resolve_cast<MasstreePage>(original.pointer_); ASSERT_ND(original_page->get_low_fence() == original.low_); ASSERT_ND(original_page->get_high_fence() == original.high_); // lock it first. PageVersionLockScope scope(context, original_page->get_version_address()); ASSERT_ND(original_page->is_locked()); // if it already has a foster child, nothing to do. if (!original_page->is_moved()) { if (original_page->is_border()) { MasstreeBorderPage* casted = reinterpret_cast<MasstreeBorderPage*>(original_page); if (casted->get_key_count() < 2U) { // Then, no split possible. LOG(INFO) << "This border page can't be split anymore"; out->emplace_back(original); return kRetOk; } // trigger doesn't matter. just make sure it doesn't cause no-record-split. so, use low_fence. // also, specify disable_nrs KeySlice trigger = casted->get_low_fence(); MasstreeBorderPage* after = casted; xct::McsLockScope after_lock; casted->split_foster(context, trigger, true, &after, &after_lock); ASSERT_ND(after->is_locked()); ASSERT_ND(after_lock.is_locked()); ASSERT_ND(casted->is_moved()); } else { MasstreeIntermediatePage* casted = reinterpret_cast<MasstreeIntermediatePage*>(original_page); uint32_t pointers = count_children(casted); if (pointers < 2U) { LOG(INFO) << "This intermediate page can't be split anymore"; out->emplace_back(original); return kRetOk; } WRAP_ERROR_CODE(casted->split_foster_no_adopt(context)); } } else { LOG(INFO) << "lucky, already split. just adopt"; } ASSERT_ND(original_page->is_moved()); VolatilePagePointer minor_pointer = original_page->get_foster_minor(); VolatilePagePointer major_pointer = original_page->get_foster_major(); ASSERT_ND(!minor_pointer.is_null()); ASSERT_ND(!major_pointer.is_null()); MasstreePage* minor = context->resolve_cast<MasstreePage>(minor_pointer); MasstreePage* major = context->resolve_cast<MasstreePage>(major_pointer); KeySlice middle = original_page->get_foster_fence(); ASSERT_ND(minor->get_low_fence() == original.low_); ASSERT_ND(minor->get_high_fence() == middle); ASSERT_ND(major->get_low_fence() == middle); ASSERT_ND(major->get_high_fence() == original.high_); Child minor_out = {minor_pointer, original.low_, middle, 0, 0}; out->emplace_back(minor_out); Child major_out = {major_pointer, middle, original.high_, 0, 0}; out->emplace_back(major_out); return kRetOk; }
void xml2json_traverse_node(rapidxml::xml_node<> *xmlnode, rapidjson::Value &jsvalue, rapidjson::Document::AllocatorType& allocator) { //cout << "this: " << xmlnode->type() << " name: " << xmlnode->name() << " value: " << xmlnode->value() << endl; rapidjson::Value jsvalue_chd; jsvalue.SetObject(); jsvalue_chd.SetObject(); rapidxml::xml_node<> *xmlnode_chd; // classified discussion: if((xmlnode->type() == rapidxml::node_data || xmlnode->type() == rapidxml::node_cdata) && xmlnode->value()) { // case: pure_text jsvalue.SetString(xmlnode->value(), allocator); // then addmember("#text" , jsvalue, allocator) } else if(xmlnode->type() == rapidxml::node_element) { if(xmlnode->first_attribute()) { if(xmlnode->first_node() && xmlnode->first_node()->type() == rapidxml::node_data && count_children(xmlnode) == 1) { // case: <e attr="xxx">text</e> rapidjson::Value jn, jv; jn.SetString(xml2json_text_additional_name, allocator); jv.SetString(xmlnode->first_node()->value(), allocator); jsvalue.AddMember(jn, jv, allocator); xml2json_add_attributes(xmlnode, jsvalue, allocator); return; } else { // case: <e attr="xxx">...</e> xml2json_add_attributes(xmlnode, jsvalue, allocator); } } else { if(!xmlnode->first_node()) { // case: <e /> jsvalue.SetNull(); return; } else if(xmlnode->first_node()->type() == rapidxml::node_data && count_children(xmlnode) == 1) { // case: <e>text</e> if (xml2json_numeric_support == false) { jsvalue.SetString(rapidjson::StringRef(xmlnode->first_node()->value()), allocator); } else { bool hasDecimal; if (xml2json_has_digits_only(xmlnode->first_node()->value(), &hasDecimal) == false) { jsvalue.SetString(rapidjson::StringRef(xmlnode->first_node()->value()), allocator); } else { char *bgnptr = xmlnode->first_node()->value(); char *endptr = bgnptr; if (hasDecimal) { double value = std::strtod(bgnptr, &endptr); if (bgnptr != endptr) jsvalue.SetDouble(value); else jsvalue.SetString(rapidjson::StringRef(bgnptr), allocator); } else { long int value = std::strtol(bgnptr, &endptr, 0); if (bgnptr != endptr) jsvalue.SetInt(value); else jsvalue.SetString(rapidjson::StringRef(bgnptr), allocator); } } } return; } } if(xmlnode->first_node()) { // case: complex else... std::map<std::string, int> name_count; for(xmlnode_chd = xmlnode->first_node(); xmlnode_chd; xmlnode_chd = xmlnode_chd->next_sibling()) { std::string current_name; const char *name_ptr = NULL; rapidjson::Value jn, jv; if(xmlnode_chd->type() == rapidxml::node_data || xmlnode_chd->type() == rapidxml::node_cdata) { current_name = xml2json_text_additional_name; name_count[current_name]++; jv.SetString(xml2json_text_additional_name, allocator); name_ptr = jv.GetString(); } else if(xmlnode_chd->type() == rapidxml::node_element) { current_name = xmlnode_chd->name(); name_count[current_name]++; name_ptr = xmlnode_chd->name(); } xml2json_traverse_node(xmlnode_chd, jsvalue_chd, allocator); if(name_count[current_name] > 1 && name_ptr) xml2json_to_array_form(name_ptr, jsvalue, jsvalue_chd, allocator); else { jn.SetString(name_ptr, allocator); jsvalue.AddMember(jn, jsvalue_chd, allocator); } } } } else { std::cerr << "err data!!" << std::endl; } }
void traverse_node(rapidxml::xml_node<> *xmlnode, rapidjson::Value &jsvalue, rapidjson::Document::AllocatorType& allocator) { //cout << "this: " << xmlnode->type() << " name: " << xmlnode->name() << " value: " << xmlnode->value() << endl; rapidjson::Value jsvalue_chd; jsvalue.SetObject(); jsvalue_chd.SetObject(); rapidxml::xml_node<> *xmlnode_chd; // classified discussion: if ((xmlnode->type() == rapidxml::node_data || xmlnode->type() == rapidxml::node_cdata) && xmlnode->value()) { // case: pure_text jsvalue.SetString(xmlnode->value(), allocator); // then addmember("#text" , jsvalue, allocator) } else if (xmlnode->type() == rapidxml::node_element) { if (xmlnode->first_attribute()) { if (xmlnode->first_node() && xmlnode->first_node()->type() == rapidxml::node_data && count_children(xmlnode) == 1) { // case: <e attr="xxx">text</e> rapidjson::Value jn, jv; jn.SetString("#text", allocator); jv.SetString(xmlnode->first_node()->value(), allocator); jsvalue.AddMember(jn, jv, allocator); add_attributes(xmlnode, jsvalue, allocator); return; } else { // case: <e attr="xxx">...</e> add_attributes(xmlnode, jsvalue, allocator); } } else { if (!xmlnode->first_node()) { // case: <e /> jsvalue.SetNull(); return; } else if (xmlnode->first_node()->type() == rapidxml::node_data && count_children(xmlnode) == 1) { // case: <e>text</e> jsvalue.SetString(rapidjson::StringRef(xmlnode->first_node()->value()), allocator); return; } } if (xmlnode->first_node()) { // case: complex else... std::map<std::string, int> name_count; for (xmlnode_chd = xmlnode->first_node(); xmlnode_chd; xmlnode_chd = xmlnode_chd->next_sibling()) { std::string current_name; const char *name_ptr = NULL; rapidjson::Value jn, jv; if (xmlnode_chd->type() == rapidxml::node_data || xmlnode_chd->type() == rapidxml::node_cdata) { current_name = "#text"; name_count[current_name]++; jv.SetString("#text", allocator); name_ptr = jv.GetString(); } else if (xmlnode_chd->type() == rapidxml::node_element) { current_name = xmlnode_chd->name(); name_count[current_name]++; name_ptr = xmlnode_chd->name(); } traverse_node(xmlnode_chd, jsvalue_chd, allocator); if (name_count[current_name] > 1 && name_ptr) to_array_form(name_ptr, jsvalue, jsvalue_chd, allocator); else { jn.SetString(name_ptr, allocator); jsvalue.AddMember(jn, jsvalue_chd, allocator); } } } } else { std::cerr << "err data!!" << std::endl; } }