struct token *expand(struct token *original) { const struct token *list; struct token *res; /* Do nothing if there is nothing to expand. */ if (!needs_expansion(original)) return original; list = original; res = calloc(1, sizeof(*res)); res[0] = token_end; while (list->token != END) { const struct macro *def = definition(*list); struct token **args; /* Only expand function-like macros if they appear as function * invocations, beginning with an open paranthesis. */ if (def && !is_macro_expanded(def) && (def->type != FUNCTION_LIKE || peek_next(list + 1) == '(')) { args = read_args(list + 1, &list, def); res = concat(res, expand_macro(def, args)); } else { res = append(res, *list++); } } free(original); return res; }
// locate an exiting record that contains specified address, or // the record, where the record with specified address, should // be inserted virtual MemPointer* locate(address addr) { VMMemRegion* cur = (VMMemRegion*)current(); VMMemRegion* next_p; while (cur != NULL) { if (cur->base() > addr) { return cur; } else { // find nearest existing range that has base address <= addr next_p = (VMMemRegion*)peek_next(); if (next_p != NULL && next_p->base() <= addr) { cur = (VMMemRegion*)next(); continue; } } if (cur->is_reserve_record() && cur->base() <= addr && (cur->base() + cur->size() > addr)) { return cur; } else if (cur->is_commit_record() && cur->base() <= addr && (cur->base() + cur->committed_size() > addr)) { return cur; } cur = (VMMemRegion*)next(); } return NULL; }
bool VMMemPointerIterator::remove_released_region(MemPointerRecord* rec) { assert(rec->is_deallocation_record(), "Sanity check"); VMMemRegion* cur = (VMMemRegion*)current(); assert(cur->is_reserved_region() && cur->contains_region(rec), "Sanity check"); if (rec->is_same_region(cur)) { // release whole reserved region #ifdef ASSERT VMMemRegion* next_region = (VMMemRegion*)peek_next(); // should not have any committed memory in this reserved region assert(next_region == NULL || !next_region->is_committed_region(), "Sanity check"); #endif remove(); } else if (rec->addr() == cur->addr() || rec->addr() + rec->size() == cur->addr() + cur->size()) { // released region is at either end of this region cur->exclude_region(rec->addr(), rec->size()); assert(check_reserved_region(), "Integrity check"); } else { // split the reserved region and release the middle address high_addr = cur->addr() + cur->size(); size_t sz = high_addr - rec->addr(); cur->exclude_region(rec->addr(), sz); sz = high_addr - rec->addr() - rec->size(); if (MemTracker::track_callsite()) { MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz, ((VMMemRegionEx*)cur)->pc()); bool ret = insert_reserved_region(&tmp); assert(!ret || check_reserved_region(), "Integrity check"); return ret; } else { MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz); bool ret = insert_reserved_region(&tmp); assert(!ret || check_reserved_region(), "Integrity check"); return ret; } } return true; }
/** * insn_get_prefixes - scan x86 instruction prefix bytes * @insn: &struct insn containing instruction * * Populates the @insn->prefixes bitmap, and updates @insn->next_byte * to point to the (first) opcode. No effect if @insn->prefixes.got * is already set. */ void insn_get_prefixes(struct insn *insn) { struct insn_field *prefixes = &insn->prefixes; insn_attr_t attr; insn_byte_t b, lb; int i, nb; if (prefixes->got) return; nb = 0; lb = 0; b = peek_next(insn_byte_t, insn); attr = inat_get_opcode_attribute(b); while (inat_is_legacy_prefix(attr)) { /* Skip if same prefix */ for (i = 0; i < nb; i++) if (prefixes->bytes[i] == b) goto found; if (nb == 4) /* Invalid instruction */ break; prefixes->bytes[nb++] = b; if (inat_is_address_size_prefix(attr)) { /* address size switches 2/4 or 4/8 */ if (insn->x86_64) insn->addr_bytes ^= 12; else insn->addr_bytes ^= 6; } else if (inat_is_operand_size_prefix(attr)) { /* oprand size switches 2/4 */ insn->opnd_bytes ^= 6; } found: prefixes->nbytes++; insn->next_byte++; lb = b; b = peek_next(insn_byte_t, insn); attr = inat_get_opcode_attribute(b); } /* Set the last prefix */ if (lb && lb != insn->prefixes.bytes[3]) { if (unlikely(insn->prefixes.bytes[3])) { /* Swap the last prefix */ b = insn->prefixes.bytes[3]; for (i = 0; i < nb; i++) if (prefixes->bytes[i] == lb) prefixes->bytes[i] = b; } insn->prefixes.bytes[3] = lb; } /* Decode REX prefix */ if (insn->x86_64) { b = peek_next(insn_byte_t, insn); attr = inat_get_opcode_attribute(b); if (inat_is_rex_prefix(attr)) { insn->rex_prefix.value = b; insn->rex_prefix.nbytes = 1; insn->next_byte++; if (X86_REX_W(b)) /* REX.W overrides opnd_size */ insn->opnd_bytes = 8; } } insn->rex_prefix.got = 1; /* Decode VEX prefix */ b = peek_next(insn_byte_t, insn); attr = inat_get_opcode_attribute(b); if (inat_is_vex_prefix(attr)) { insn_byte_t b2 = peek_nbyte_next(insn_byte_t, insn, 1); if (!insn->x86_64) { /* * In 32-bits mode, if the [7:6] bits (mod bits of * ModRM) on the second byte are not 11b, it is * LDS or LES. */ if (X86_MODRM_MOD(b2) != 3) goto vex_end; } insn->vex_prefix.bytes[0] = b; insn->vex_prefix.bytes[1] = b2; if (inat_is_vex3_prefix(attr)) { b2 = peek_nbyte_next(insn_byte_t, insn, 2); insn->vex_prefix.bytes[2] = b2; insn->vex_prefix.nbytes = 3; insn->next_byte += 3; if (insn->x86_64 && X86_VEX_W(b2)) /* VEX.W overrides opnd_size */ insn->opnd_bytes = 8; } else { insn->vex_prefix.nbytes = 2; insn->next_byte += 2; } } vex_end: insn->vex_prefix.got = 1; prefixes->got = 1; return; }