machine_instr *instr_access(cfg_node *node, int index) { assert(NULL != node); assert(index >= 0); assert(node->is_block()); int count = cfg_node_static_instr_size(node); assert(index < count); machine_instr *target = NULL; cfg_node_instr_iter instrs(node); while (!instrs.is_empty()) { tree_instr *ti = instrs.step(); if ((int) ((machine_instr *) ti->instr())->opcode() == mo_null || (int) ((machine_instr *) ti->instr())->opcode() == mo_lab || (int) ((machine_instr *) ti->instr())->opcode() == mo_loc) { continue; } else if (index == 0) { target = (machine_instr*)ti->instr(); break; } else { --index; } } assert(NULL != target); return target; }
void breakpoint_Relocation::set_target(address x) { assert(settable(), "must be settable"); jint target_bits = (jint)internal() ? scaled_offset (x) : runtime_address_to_index(x); short* p = &live_bits() + 1; add_long(p, target_bits); assert(p == instrs(), "new target must fit"); _target = x; }
void breakpoint_Relocation::set_active(bool b) { assert(!is_copy(), "cannot change breakpoint state when working on a copy"); assert(!b || enabled(), "cannot activate a disabled breakpoint"); if (active() == b) return; // %%% should probably seize a lock here (might not be the right lock) //MutexLockerEx ml_patch(Patching_lock, true); //if (active() == b) return; // recheck state after locking if (b) { set_bits(bits() | active_state); if (instrlen() == 0) fatal("breakpoints in original code must be undoable"); pd_swap_in_breakpoint (addr(), instrs(), instrlen()); } else { set_bits(bits() & ~active_state); pd_swap_out_breakpoint(addr(), instrs(), instrlen()); } }