static CA_BOOL mark_blocks_referenced_by_globals_locals(struct inuse_block* blocks, unsigned long total_blocks, unsigned int* qv_bitmap) { unsigned int seg_index; size_t ptr_sz = g_ptr_bit >> 3; for (seg_index = 0; seg_index < g_segment_count; seg_index++) { struct ca_segment* segment = &g_segments[seg_index]; // This search may take long, bail out if user is impatient if (user_request_break()) { CA_PRINT("Abort searching\n"); break; } if (segment->m_fsize == 0) continue; // Only local/global variables are checked if (segment->m_type == ENUM_STACK || segment->m_type == ENUM_MODULE_DATA || segment->m_type == ENUM_MODULE_TEXT) { address_t start, next, end; start = segment->m_vaddr; end = start + segment->m_fsize; // ignore stack memory below stack pointer if (segment->m_type == ENUM_STACK) { address_t rsp = get_rsp(segment); if (rsp >= segment->m_vaddr && rsp < segment->m_vaddr + segment->m_vsize) start = rsp; } next = ALIGN(start, ptr_sz); while (next + ptr_sz <= end) { address_t ptr; struct inuse_block* blk; if (!read_memory_wrapper(segment, next, &ptr, ptr_sz)) break; blk = find_inuse_block(ptr, blocks, total_blocks); if (blk) { unsigned long index = blk - blocks; set_queued_and_visited(qv_bitmap, index); } next += ptr_sz; } } } return CA_TRUE; }
int main(){ rbp_init=get_rbp(); rsp_init=get_rsp(); printf("stack pointer of main = %ld\n",rsp_init); printf("frame pointer of main = %ld\n",rbp_init); print_stack_frame(); // recur(1); }
static int db_rsp(struct db_variable *vp, db_expr_t *valuep, int op) { if (kdb_frame == NULL) return (0); if (op == DB_VAR_GET) *valuep = get_rsp(kdb_frame); else if (ISPL(kdb_frame->tf_cs)) kdb_frame->tf_rsp = *valuep; return (1); }
void stack_trace() { offset_t *rbp, *rip; printf("\n------ Setup Stack Trace [rsp: 0x%X | rip 0x%X]\n", get_rsp(), get_pc()); rbp = (offset_t*)get_rbp(); while(rbp && rbp < &__kernel_start__) { rip = (offset_t*)(*(rbp+1)); rbp = (offset_t*)(*rbp); printf("%X\n", (offset_t)rip); } }
int recur(int x){ count++; get_rbp_2; get_rsp_2; printf("%rbp is %ld, using register local var it's %ld\n",get_rbp(),rbp); printf("%rsp is %ld, using register local var it's %ld\n",get_rsp(),rsp); printf("rbp difference is %ld\n",rbp_init-rbp); printf("rsp difference is %ld\n",rsp_init-rsp); printf("recursion depth is %ld\n",((rsp_init-rsp)/48)); printf("frame number (using rsp) %ld\n",frameCount()); printf("frame number (using rbp) %ld\n",(rbp_init-getFP().rbp)/48); if(x<0){ return -1; } else { return recur(recur(x+x)); } }
/* * Find/display global/local variables which own the most heap memory in bytes */ CA_BOOL biggest_heap_owners_generic(unsigned int num, CA_BOOL all_reachable_blocks) { CA_BOOL rc = CA_FALSE; unsigned int i; int nregs = 0; struct reg_value *regs_buf = NULL; size_t ptr_sz = g_ptr_bit >> 3; struct heap_owner *owners; struct heap_owner *smallest; struct ca_segment *segment; size_t total_bytes = 0; size_t processed_bytes = 0; struct inuse_block *inuse_blocks = NULL; unsigned long num_inuse_blocks; unsigned long inuse_index; struct inuse_block *blk; struct object_reference ref; size_t aggr_size; unsigned long aggr_count; address_t start, end, cursor; // Allocate an array for the biggest num of owners if (num == 0) return CA_FALSE; owners = (struct heap_owner *) calloc(num, sizeof(struct heap_owner)); if (!owners) goto clean_out; smallest = &owners[num - 1]; // First, create and populate an array of all in-use blocks inuse_blocks = build_inuse_heap_blocks(&num_inuse_blocks); if (!inuse_blocks || num_inuse_blocks == 0) { CA_PRINT("Failed: no in-use heap block is found\n"); goto clean_out; } // estimate the work to enable progress bar for (i=0; i<g_segment_count; i++) { segment = &g_segments[i]; if (segment->m_type == ENUM_STACK || segment->m_type == ENUM_MODULE_DATA) total_bytes += segment->m_fsize; } init_progress_bar(total_bytes); // Walk through all segments of threads' registers/stacks or globals for (i=0; i<g_segment_count; i++) { // bail out if user is impatient for the long searching if (user_request_break()) { CA_PRINT("Abort searching biggest heap memory owners\n"); goto clean_out; } // Only thread stack and global .data sections are considered segment = &g_segments[i]; if (segment->m_type == ENUM_STACK || segment->m_type == ENUM_MODULE_DATA) { int tid = 0; // check registers if it is a thread's stack segment if (segment->m_type == ENUM_STACK) { tid = get_thread_id (segment); // allocate register value buffer for once if (!nregs && !regs_buf) { nregs = read_registers (NULL, NULL, 0); if (nregs) regs_buf = (struct reg_value*) malloc(nregs * sizeof(struct reg_value)); } // check each register for heap reference if (nregs && regs_buf) { int k; int nread = read_registers (segment, regs_buf, nregs); for (k = 0; k < nread; k++) { if (regs_buf[k].reg_width == ptr_sz) { blk = find_inuse_block(regs_buf[k].value, inuse_blocks, num_inuse_blocks); if (blk) { ref.storage_type = ENUM_REGISTER; ref.vaddr = 0; ref.value = blk->addr; ref.where.reg.tid = tid; ref.where.reg.reg_num = k; ref.where.reg.name = NULL; calc_aggregate_size(&ref, ptr_sz, all_reachable_blocks, inuse_blocks, num_inuse_blocks, &aggr_size, &aggr_count); if (aggr_size > smallest->aggr_size) { struct heap_owner newowner; newowner.ref = ref; newowner.aggr_size = aggr_size; newowner.aggr_count = aggr_count; add_owner(owners, num, &newowner); } } } } } } // Calculate the memory region to search if (segment->m_type == ENUM_STACK) { start = get_rsp(segment); if (start < segment->m_vaddr || start >= segment->m_vaddr + segment->m_vsize) start = segment->m_vaddr; if (start - segment->m_vaddr >= segment->m_fsize) end = start; else end = segment->m_vaddr + segment->m_fsize; } else if (segment->m_type == ENUM_MODULE_DATA) { start = segment->m_vaddr; end = segment->m_vaddr + segment->m_fsize; } else continue; // Evaluate each variable or raw pointer in the target memory region cursor = ALIGN(start, ptr_sz); while (cursor < end) { size_t val_len = ptr_sz; address_t sym_addr; size_t sym_sz; CA_BOOL known_sym = CA_FALSE; // If the address belongs to a known variable, include all its subfields // FIXME // consider subfields that are of pointer-like types, however, it will miss // references in an unstructured buffer ref.storage_type = segment->m_type; ref.vaddr = cursor; if (segment->m_type == ENUM_STACK) { ref.where.stack.tid = tid; ref.where.stack.frame = get_frame_number(segment, cursor, &ref.where.stack.offset); if (known_stack_sym(&ref, &sym_addr, &sym_sz) && sym_sz) known_sym = CA_TRUE; } else if (segment->m_type == ENUM_MODULE_DATA) { ref.where.module.base = segment->m_vaddr; ref.where.module.size = segment->m_vsize; ref.where.module.name = segment->m_module_name; if (known_global_sym(&ref, &sym_addr, &sym_sz) && sym_sz) known_sym = CA_TRUE; } if (known_sym) { if (cursor != sym_addr) ref.vaddr = cursor = sym_addr; // we should never come to here! val_len = sym_sz; } // Query heap for aggregated memory size/count originated from the candidate variable if (val_len >= ptr_sz) { calc_aggregate_size(&ref, val_len, all_reachable_blocks, inuse_blocks, num_inuse_blocks, &aggr_size, &aggr_count); // update the top list if applies if (aggr_size >= smallest->aggr_size) { struct heap_owner newowner; if (val_len == ptr_sz) read_memory_wrapper(NULL, ref.vaddr, (void*)&ref.value, ptr_sz); else ref.value = 0; newowner.ref = ref; newowner.aggr_size = aggr_size; newowner.aggr_count = aggr_count; add_owner(owners, num, &newowner); } } cursor = ALIGN(cursor + val_len, ptr_sz); } processed_bytes += segment->m_fsize; set_current_progress(processed_bytes); } } end_progress_bar(); if (!all_reachable_blocks) { // Big memory blocks may be referenced indirectly by local/global variables // check all in-use blocks for (inuse_index = 0; inuse_index < num_inuse_blocks; inuse_index++) { blk = &inuse_blocks[inuse_index]; ref.storage_type = ENUM_HEAP; ref.vaddr = blk->addr; ref.where.heap.addr = blk->addr; ref.where.heap.size = blk->size; ref.where.heap.inuse = 1; calc_aggregate_size(&ref, ptr_sz, CA_FALSE, inuse_blocks, num_inuse_blocks, &aggr_size, &aggr_count); // update the top list if applies if (aggr_size >= smallest->aggr_size) { struct heap_owner newowner; ref.value = 0; newowner.ref = ref; newowner.aggr_size = aggr_size; newowner.aggr_count = aggr_count; add_owner(owners, num, &newowner); } } } // Print the result for (i = 0; i < num; i++) { struct heap_owner *owner = &owners[i]; if (owner->aggr_size) { CA_PRINT("[%d] ", i+1); print_ref(&owner->ref, 0, CA_FALSE, CA_FALSE); CA_PRINT(" |--> "); print_size(owner->aggr_size); CA_PRINT(" (%ld blocks)\n", owner->aggr_count); } } rc = CA_TRUE; clean_out: // clean up if (regs_buf) free (regs_buf); if (owners) free (owners); if (inuse_blocks) free_inuse_heap_blocks (inuse_blocks, num_inuse_blocks); return rc; }