/** * Scan a region of memory and mark any items in the used list appropriately. * Both arguments should be word aligned. */ void mark_from_region(void *start_ptr, void *end_ptr) { header_t *block_ptr; void *current_ptr; //Iterate word-wise throught the memory for(current_ptr = start_ptr; current_ptr < end_ptr; current_ptr += sizeof(void*)) { ptr_int value; value = *(ptr_int*)current_ptr; //Iterate thought used memory blocks block_ptr = usedptr; do { //If pointer value point somewhere into this allocate block if((ptr_int)start_of_block(block_ptr) <= value && (ptr_int)end_of_block(block_ptr) > value) { tag(block_ptr); break; } block_ptr = next_block(block_ptr); }while(block_ptr != NULL); } }
/** * Scans the heap for active pointers and marks ones that points somewhere */ void mark_from_heap(void) { header_t *current_ptr; for(current_ptr = usedptr; current_ptr != NULL; current_ptr = next_block(current_ptr)) { mark_from_region(start_of_block(current_ptr), end_of_block(current_ptr)); } }
static int endoftextblock(Line *t, int toplevelblock, DWORD flags) { int z; if ( end_of_block(t) || isquote(t) ) return 1; /* HORRIBLE STANDARDS KLUDGES: * 1. non-toplevel paragraphs absorb adjacent code blocks * 2. Toplevel paragraphs eat absorb adjacent list items, * but sublevel blocks behave properly. * (What this means is that we only need to check for code * blocks at toplevel, and only check for list items at * nested levels.) */ return toplevelblock ? 0 : islist(t,&z,flags,&z); }
int ObSSTableBlockScanner::get_next_row(const ObRowkey* &row_key, const ObRow *&row_value) { int ret = OB_SUCCESS; if (end_of_block()) { ret = OB_BEYOND_THE_RANGE; } else if (OB_SUCCESS != (ret = store_and_advance_row())) { TBSYS_LOG(ERROR, "store_and_advance_row error, ret= %d", ret); } else { row_key = ¤t_rowkey_; row_value = ¤t_row_; } return ret; }
static int islist(Line *t, int *clip, DWORD flags, int *list_type) { int i, j; char *q; if ( end_of_block(t) ) return 0; if ( !(flags & (MKD_NODLIST|MKD_STRICT)) && isdefinition(t,clip,list_type) ) return DL; if ( strchr("*-+", T(t->text)[t->dle]) && isspace(T(t->text)[t->dle+1]) ) { i = nextnonblank(t, t->dle+1); *clip = (i > 4) ? 4 : i; *list_type = UL; return AL; } if ( (j = nextblank(t,t->dle)) > t->dle ) { if ( T(t->text)[j-1] == '.' ) { if ( !(flags & (MKD_NOALPHALIST|MKD_STRICT)) && (j == t->dle + 2) && isalpha(T(t->text)[t->dle]) ) { j = nextnonblank(t,j); *clip = (j > 4) ? 4 : j; *list_type = AL; return AL; } strtoul(T(t->text)+t->dle, &q, 10); if ( (q > T(t->text)+t->dle) && (q == T(t->text) + (j-1)) ) { j = nextnonblank(t,j); /* *clip = j; */ *clip = (j > 4) ? 4 : j; *list_type = OL; return AL; } } } return 0; }
static Line* is_extra_dt(Line *t, int *clip, DWORD flags) { if ( flags & MKD_DLEXTRA && t && t->next && S(t->text) && T(t->text)[0] != '=' && T(t->text)[S(t->text)-1] != '=') { Line *x; if ( iscode(t) || end_of_block(t, flags) ) return 0; if ( (x = skipempty(t->next)) && is_extra_dd(x) ) { *clip = x->dle+2; return t; } if ( x=is_extra_dt(t->next, clip, flags) ) return x; } return 0; }
static Line* is_extra_dt(Line *t, int *clip) { #if USE_EXTRA_DL if ( t && t->next && S(t->text) && T(t->text)[0] != '=' && T(t->text)[S(t->text)-1] != '=') { Line *x; if ( iscode(t) || end_of_block(t) ) return 0; if ( (x = skipempty(t->next)) && is_extra_dd(x) ) { *clip = x->dle+2; return t; } if ( x=is_extra_dt(t->next, clip) ) return x; } #endif return 0; }
void MethodTransform::build_cfg() { // Find the block boundaries std::unordered_map<MethodItemEntry*, std::vector<Block*>> branch_to_targets; std::vector<std::pair<DexTryItem*, Block*>> try_ends; std::unordered_map<DexTryItem*, std::vector<Block*>> try_catches; size_t id = 0; bool in_try = false; m_blocks.emplace_back(new Block(id++)); m_blocks.back()->m_begin = m_fmethod->begin(); // The first block can be a branch target. auto begin = m_fmethod->begin(); if (begin->type == MFLOW_TARGET) { branch_to_targets[begin->target->src].push_back(m_blocks.back()); } for (auto it = m_fmethod->begin(); it != m_fmethod->end(); ++it) { if (it->type == MFLOW_TRY) { if (it->tentry->type == TRY_START) { in_try = true; } else if (it->tentry->type == TRY_END) { in_try = false; } } if (!end_of_block(m_fmethod, it, in_try)) { continue; } // End the current block. auto next = std::next(it); if (next == m_fmethod->end()) { m_blocks.back()->m_end = next; continue; } // Start a new block at the next MethodItem. auto next_block = new Block(id++); if (next->type == MFLOW_OPCODE) { insert_fallthrough(m_fmethod, &*next); next = std::next(it); } m_blocks.back()->m_end = next; next_block->m_begin = next; m_blocks.emplace_back(next_block); // Record branch targets to add edges in the next pass. if (next->type == MFLOW_TARGET) { branch_to_targets[next->target->src].push_back(next_block); continue; } // Record try/catch blocks to add edges in the next pass. if (next->type == MFLOW_TRY) { if (next->tentry->type == TRY_END) { try_ends.emplace_back(next->tentry->tentry, next_block); } else if (next->tentry->type == TRY_CATCH) { try_catches[next->tentry->tentry].push_back(next_block); } } } // Link the blocks together with edges for (auto it = m_blocks.begin(); it != m_blocks.end(); ++it) { // Set outgoing edge if last MIE falls through auto lastmei = (*it)->rbegin(); bool fallthrough = true; if (lastmei->type == MFLOW_OPCODE) { auto lastop = lastmei->insn->opcode(); if (is_goto(lastop) || is_conditional_branch(lastop) || is_multi_branch(lastop)) { fallthrough = !is_goto(lastop); auto const& targets = branch_to_targets[&*lastmei]; for (auto target : targets) { (*it)->m_succs.push_back(target); target->m_preds.push_back(*it); } } else if (is_return(lastop) || lastop == OPCODE_THROW) { fallthrough = false; } } if (fallthrough && std::next(it) != m_blocks.end()) { Block* next = *std::next(it); (*it)->m_succs.push_back(next); next->m_preds.push_back(*it); } } /* * Now add the catch edges. Every block inside a try-start/try-end region * gets an edge to every catch block. This simplifies dataflow analysis * since you can always get the exception state by looking at successors, * without any additional analysis. * * NB: This algorithm assumes that a try-start/try-end region will consist of * sequentially-numbered blocks, which is guaranteed because catch regions * are contiguous in the bytecode, and we generate blocks in bytecode order. */ for (auto tep : try_ends) { auto tryitem = tep.first; auto tryendblock = tep.second; size_t bid = tryendblock->id(); always_assert(bid > 0); --bid; while (true) { auto block = m_blocks[bid]; if (ends_with_may_throw(block)) { auto& catches = try_catches[tryitem]; for (auto catchblock : catches) { block->m_succs.push_back(catchblock); catchblock->m_preds.push_back(block); } } auto begin = block->begin(); if (begin->type == MFLOW_TRY) { auto tentry = begin->tentry; if (tentry->type == TRY_START && tentry->tentry == tryitem) { break; } } always_assert_log(bid > 0, "No beginning of try region found"); --bid; } } TRACE(CFG, 5, "%s\n", show(m_method).c_str()); TRACE(CFG, 5, "%s", show(m_blocks).c_str()); }