int contained_in (const struct block *a, const struct block *b) { if (!a || !b) return 0; return BLOCK_START (a) >= BLOCK_START (b) && BLOCK_END (a) <= BLOCK_END (b); }
/* Print out the free list structure */ int print_heap() { int i, j; int total_size = 0; free_block_t *fbp, *ptr; if (!HEAP_INIT) /* Make sure RecoverableHeapStartAddress is inited */ return -1; START_CRITICAL; { printf( "Heap starts at %lx, uses %ld sized chunks, and use %ld of %ld lists\n", (long)RecoverableHeapStartAddress, RDS_CHUNK_SIZE, RDS_MAXLIST, RDS_NLISTS); for (i = 1; i < RDS_NLISTS + 1; i++) { printf("list %d %c\n", i, ((i == RDS_MAXLIST) ? '+' : ' ')); fbp = RDS_FREE_LIST[i].head; if (RDS_FREE_LIST[i].guard != FREE_LIST_GUARD) printf("Bad guard on list %d!!!\n", i); if (fbp && (fbp->prev != (free_block_t *)NULL)) printf("Non-null Initial prev pointer.\n"); j = 1; while (fbp != NULL) { printf("%d block %lx, size %ld\n", j++, (long)fbp, fbp->size); total_size += fbp->size; if (fbp->type != FREE_GUARD) printf("Bad lowguard on block\n"); if ((*BLOCK_END(fbp)) != END_GUARD) printf("Bad highguard, %p=%lx\n", BLOCK_END(fbp), *BLOCK_END(fbp)); ptr = fbp->next; if (ptr && (ptr->prev != fbp)) printf("Bad chain link %lx <-> %lx\n", (long)fbp, (long)ptr); if (i != RDS_MAXLIST && fbp->size != i) printf("OBJECT IS ON WRONG LIST!!!!\n"); fbp = fbp->next; } } } END_CRITICAL; printf("Sum of sizes of objects in free lists is %d.\n", total_size); return 0; }
static bool IsInBlockInfo(void *block, size_t size) { t_BlockInfo *p_blockinfo; byte *p_block = (byte *)block; byte *p_virtual_end; byte *p_actual_end; ASSERT(NULL != p_block && 0 != size); p_blockinfo = GetBlockInfo(p_block); p_virtual_end = BLOCK_END(p_block, size); p_actual_end = BLOCK_END(p_blockinfo->m_begin, p_blockinfo->m_size); if (p_virtual_end > p_actual_end) return false; else return true; }
struct frame_info * block_innermost_frame (struct block *block) { struct frame_info *frame; CORE_ADDR start; CORE_ADDR end; CORE_ADDR calling_pc; if (block == NULL) return NULL; start = BLOCK_START (block); end = BLOCK_END (block); frame = NULL; while (1) { frame = get_prev_frame (frame); if (frame == NULL) return NULL; calling_pc = get_frame_address_in_block (frame); if (calling_pc >= start && calling_pc < end) return frame; } }
void GeneralHandler::flushNetwork() { if (!mNetwork) return; BLOCK_START("GeneralHandler::flushNetwork 1") mNetwork->flush(); BLOCK_END("GeneralHandler::flushNetwork 1") mNetwork->dispatchMessages(); BLOCK_START("GeneralHandler::flushNetwork 3") if (mNetwork->getState() == Network::NET_ERROR) { if (!mNetwork->getError().empty()) { errorMessage = mNetwork->getError(); } else { // TRANSLATORS: error message errorMessage = _("Got disconnected from server!"); } Client::setState(STATE_ERROR); } BLOCK_END("GeneralHandler::flushNetwork 3") }
int inside_main_func (CORE_ADDR pc) { if (pc == 0) return 1; if (symfile_objfile == 0) return 0; /* If the addr range is not set up at symbol reading time, set it up now. This is for FRAME_CHAIN_VALID_ALTERNATE. I do this for coff, because it is unable to set it up and symbol reading time. */ if (symfile_objfile->ei.main_func_lowpc == INVALID_ENTRY_LOWPC && symfile_objfile->ei.main_func_highpc == INVALID_ENTRY_HIGHPC) { struct symbol *mainsym; mainsym = lookup_symbol (main_name (), NULL, VAR_NAMESPACE, NULL, NULL); if (mainsym && SYMBOL_CLASS (mainsym) == LOC_BLOCK) { symfile_objfile->ei.main_func_lowpc = BLOCK_START (SYMBOL_BLOCK_VALUE (mainsym)); symfile_objfile->ei.main_func_highpc = BLOCK_END (SYMBOL_BLOCK_VALUE (mainsym)); } } return (symfile_objfile->ei.main_func_lowpc <= pc && symfile_objfile->ei.main_func_highpc > pc); }
struct frame_info * block_innermost_frame (const struct block *block) { struct frame_info *frame; CORE_ADDR start; CORE_ADDR end; if (block == NULL) return NULL; start = BLOCK_START (block); end = BLOCK_END (block); frame = get_selected_frame_if_set (); if (frame == NULL) frame = get_current_frame (); while (frame != NULL) { struct block *frame_block = get_frame_block (frame, NULL); if (frame_block != NULL && contained_in (frame_block, block)) return frame; frame = get_prev_frame (frame); } return NULL; }
/* Return a 1 if any of the address ranges for block BL begins with START and any of the address ranges for BL ends with END; return a 0 otherwise. */ int block_starts_and_ends (struct block *bl, CORE_ADDR start, CORE_ADDR end) { int retval; int start_found = 0; int end_found = 0; if (!BLOCK_RANGES (bl)) retval = BLOCK_START (bl) == start && BLOCK_END (bl) == end; else { int i; for (i = 0; i < BLOCK_RANGES (bl)->nelts && !start_found && !end_found; i++) { if (BLOCK_RANGE_START (bl, i) == start) start_found = 1; if (BLOCK_RANGE_END (bl, i) == end) end_found = 1; } retval = start_found && end_found; } return retval; }
static PyObject * blpy_get_end (PyObject *self, void *closure) { const struct block *block = NULL; BLPY_REQUIRE_VALID (self, block); return gdb_py_object_from_ulongest (BLOCK_END (block)); }
static PyObject * blpy_get_end (PyObject *self, void *closure) { struct block *block = NULL; BLPY_REQUIRE_VALID (self, block); return PyLong_FromUnsignedLongLong (BLOCK_END (block)); }
void check_block(malloc_block_t* block) { if ( (block->magic!=MALLOC_MAGIC) | (BLOCK_END(block)->magic!=MALLOC_MAGIC) | (BLOCK_END(block)->backlink!=block)) { kpanic("corrupted malloc data"); } if ((block->flags&MALLOC_LAST)==0) { malloc_block_t* next = BLOCK_NEXT(block); if ((next->magic!=MALLOC_MAGIC) || (next->flags&MALLOC_FIRST)) { kpanic("corrupted malloc chain"); } } if ((block->flags&MALLOC_FIRST)==0) { malloc_block_t* prev = BLOCK_PREVIOUS(block); if ((prev->magic!=MALLOC_MAGIC) || (prev->flags&MALLOC_LAST)) { kpanic("corrupted malloc chain"); } } }
struct blockvector * blockvector_for_pc_sect (CORE_ADDR pc, struct bfd_section *section, int *pindex, struct symtab *symtab) { struct block *b; int bot, top, half; struct blockvector *bl; if (symtab == 0) /* if no symtab specified by caller */ { /* First search all symtabs for one whose file contains our pc */ symtab = find_pc_sect_symtab (pc, section); if (symtab == 0) return 0; } bl = BLOCKVECTOR (symtab); b = BLOCKVECTOR_BLOCK (bl, 0); /* Then search that symtab for the smallest block that wins. */ /* Use binary search to find the last block that starts before PC. */ bot = 0; top = BLOCKVECTOR_NBLOCKS (bl); while (top - bot > 1) { half = (top - bot + 1) >> 1; b = BLOCKVECTOR_BLOCK (bl, bot + half); if (BLOCK_START (b) <= pc) bot += half; else top = bot + half; } /* Now search backward for a block that ends after PC. */ while (bot >= 0) { b = BLOCKVECTOR_BLOCK (bl, bot); if (BLOCK_END (b) > pc) { if (pindex) *pindex = bot; return bl; } bot--; } return 0; }
void setup_block(malloc_block_t* block, uint32_t flags, uint32_t size) { #ifdef MALLOC_DEBUG block->magic = MALLOC_MAGIC; #endif block->flags = flags; block->size = size; malloc_block_end_t* block_end = BLOCK_END(block); #ifdef MALLOC_DEBUG block_end->magic = MALLOC_MAGIC; #endif block_end->backlink = block; }
CORE_ADDR block_highest_pc (const struct block *bl) { int i; CORE_ADDR highest = 0; if (BLOCK_RANGES (bl) == NULL) return BLOCK_END (bl); for (i = 0; i < BLOCK_RANGES (bl)->nelts; i++) { if (highest < BLOCK_RANGE_END (bl, i)) highest = BLOCK_RANGE_END (bl, i); } return highest; }
void TclpFree( char *oldPtr) /* Pointer to memory to free. */ { register long size; register union overhead *overPtr; struct block *bigBlockPtr; if (oldPtr == NULL) { return; } Tcl_MutexLock(allocMutexPtr); overPtr = (union overhead *)((caddr_t)oldPtr - sizeof (union overhead)); ASSERT(overPtr->overMagic0 == MAGIC); /* make sure it was in use */ ASSERT(overPtr->overMagic1 == MAGIC); if (overPtr->overMagic0 != MAGIC || overPtr->overMagic1 != MAGIC) { Tcl_MutexUnlock(allocMutexPtr); return; } RANGE_ASSERT(overPtr->rangeCheckMagic == RMAGIC); RANGE_ASSERT(BLOCK_END(overPtr) == RMAGIC); size = overPtr->bucketIndex; if (size == 0xff) { #ifdef MSTATS numMallocs[NBUCKETS]--; #endif bigBlockPtr = (struct block *) overPtr - 1; bigBlockPtr->prevPtr->nextPtr = bigBlockPtr->nextPtr; bigBlockPtr->nextPtr->prevPtr = bigBlockPtr->prevPtr; TclpSysFree(bigBlockPtr); Tcl_MutexUnlock(allocMutexPtr); return; } ASSERT(size < NBUCKETS); overPtr->next = nextf[size]; /* also clobbers overMagic */ nextf[size] = overPtr; #ifdef MSTATS numMallocs[size]--; #endif Tcl_MutexUnlock(allocMutexPtr); }
int block_contains_pc (const struct block *bl, CORE_ADDR pc) { int i; int contains_pc = 0; if (! BLOCK_RANGES (bl)) /* No range list; just a low & high address */ contains_pc = BLOCK_START (bl) <= pc && BLOCK_END (bl) > pc; else for (i = 0; i < BLOCK_RANGES (bl)->nelts && !contains_pc; i++) if (BLOCK_RANGE_START (bl, i) <= pc && BLOCK_RANGE_END (bl, i) > pc) contains_pc = 1; return contains_pc; }
static t_BlockInfo* GetBlockInfo(byte *block) { t_BlockInfo *p_blockinfo; byte *p_begin; byte *p_end; ASSERT(NULL != block); for (p_blockinfo = g_head; NULL != p_blockinfo; p_blockinfo = p_blockinfo->m_next) { p_begin = BLOCK_BEGIN(p_blockinfo->m_begin); p_end = BLOCK_END(p_blockinfo->m_begin, p_blockinfo->m_size); if (p_begin > block || p_end < block) continue; else break; } ASSERT(NULL != p_blockinfo); return p_blockinfo; }
static struct block * find_block_in_blockvector (struct blockvector *bl, CORE_ADDR pc) { struct block *b; int bot, top, half; /* If we have an addrmap mapping code addresses to blocks, then use that. */ if (BLOCKVECTOR_MAP (bl)) return addrmap_find (BLOCKVECTOR_MAP (bl), pc); /* Otherwise, use binary search to find the last block that starts before PC. Note: GLOBAL_BLOCK is block 0, STATIC_BLOCK is block 1. They both have the same START,END values. Historically this code would choose STATIC_BLOCK over GLOBAL_BLOCK but the fact that this choice was made was subtle, now we make it explicit. */ gdb_assert (BLOCKVECTOR_NBLOCKS (bl) >= 2); bot = STATIC_BLOCK; top = BLOCKVECTOR_NBLOCKS (bl); while (top - bot > 1) { half = (top - bot + 1) >> 1; b = BLOCKVECTOR_BLOCK (bl, bot + half); if (BLOCK_START (b) <= pc) bot += half; else top = bot + half; } /* Now search backward for a block that ends after PC. */ while (bot >= STATIC_BLOCK) { b = BLOCKVECTOR_BLOCK (bl, bot); if (BLOCK_END (b) > pc) return b; bot--; } return NULL; }
struct block * allocate_block(struct obstack *obstack) { struct block *bl = (struct block *)obstack_alloc(obstack, sizeof(struct block)); BLOCK_START(bl) = 0; BLOCK_END(bl) = 0; BLOCK_FUNCTION(bl) = NULL; BLOCK_SUPERBLOCK(bl) = NULL; BLOCK_DICT(bl) = NULL; BLOCK_NAMESPACE(bl) = NULL; BLOCK_GCC_COMPILED(bl) = 0; /* APPLE LOCAL begin address ranges */ BLOCK_RANGES(bl) = NULL; /* APPLE LOCAL end address ranges */ return bl; }
static void nlm_symfile_read (struct objfile *objfile, int mainline) { bfd *abfd = objfile->obfd; struct cleanup *back_to; CORE_ADDR offset; struct symbol *mainsym; init_minimal_symbol_collection (); back_to = make_cleanup_discard_minimal_symbols (); /* FIXME, should take a section_offsets param, not just an offset. */ offset = ANOFFSET (objfile->section_offsets, 0); /* Process the NLM export records, which become the bfd's canonical symbol table. */ nlm_symtab_read (abfd, offset, objfile); /* Install any minimal symbols that have been collected as the current minimal symbols for this objfile. */ install_minimal_symbols (objfile); do_cleanups (back_to); stabsect_build_psymtabs (objfile, mainline, ".stab", ".stabstr", ".text"); mainsym = lookup_symbol (main_name (), NULL, VAR_DOMAIN, NULL, NULL); if (mainsym && SYMBOL_CLASS (mainsym) == LOC_BLOCK) { objfile->ei.main_func_lowpc = BLOCK_START (SYMBOL_BLOCK_VALUE (mainsym)); objfile->ei.main_func_highpc = BLOCK_END (SYMBOL_BLOCK_VALUE (mainsym)); } /* FIXME: We could locate and read the optional native debugging format here and add the symbols to the minimal symbol table. */ }
struct frame_info * block_innermost_frame (struct block *block) { struct frame_info *frame; register CORE_ADDR start; register CORE_ADDR end; if (block == NULL) return NULL; start = BLOCK_START (block); end = BLOCK_END (block); frame = NULL; while (1) { frame = get_prev_frame (frame); if (frame == NULL) return NULL; if (frame->pc >= start && frame->pc < end) return frame; } }
char * TclpRealloc( char *oldPtr, /* Pointer to alloced block. */ unsigned int numBytes) /* New size of memory. */ { int i; union overhead *overPtr; struct block *bigBlockPtr; int expensive; unsigned long maxSize; if (oldPtr == NULL) { return TclpAlloc(numBytes); } Tcl_MutexLock(allocMutexPtr); overPtr = (union overhead *)((caddr_t)oldPtr - sizeof (union overhead)); ASSERT(overPtr->overMagic0 == MAGIC); /* make sure it was in use */ ASSERT(overPtr->overMagic1 == MAGIC); if (overPtr->overMagic0 != MAGIC || overPtr->overMagic1 != MAGIC) { Tcl_MutexUnlock(allocMutexPtr); return NULL; } RANGE_ASSERT(overPtr->rangeCheckMagic == RMAGIC); RANGE_ASSERT(BLOCK_END(overPtr) == RMAGIC); i = overPtr->bucketIndex; /* * If the block isn't in a bin, just realloc it. */ if (i == 0xff) { struct block *prevPtr, *nextPtr; bigBlockPtr = (struct block *) overPtr - 1; prevPtr = bigBlockPtr->prevPtr; nextPtr = bigBlockPtr->nextPtr; bigBlockPtr = (struct block *) TclpSysRealloc(bigBlockPtr, sizeof(struct block) + OVERHEAD + numBytes); if (bigBlockPtr == NULL) { Tcl_MutexUnlock(allocMutexPtr); return NULL; } if (prevPtr->nextPtr != bigBlockPtr) { /* * If the block has moved, splice the new block into the list * where the old block used to be. */ prevPtr->nextPtr = bigBlockPtr; nextPtr->prevPtr = bigBlockPtr; } overPtr = (union overhead *) (bigBlockPtr + 1); #ifdef MSTATS numMallocs[NBUCKETS]++; #endif #ifdef RCHECK /* * Record allocated size of block and update magic number bounds. */ overPtr->realBlockSize = (numBytes + RSLOP - 1) & ~(RSLOP - 1); BLOCK_END(overPtr) = RMAGIC; #endif Tcl_MutexUnlock(allocMutexPtr); return (char *)(overPtr+1); } maxSize = 1 << (i+3); expensive = 0; if (numBytes+OVERHEAD > maxSize) { expensive = 1; } else if (i>0 && numBytes+OVERHEAD < maxSize/2) { expensive = 1; } if (expensive) { void *newPtr; Tcl_MutexUnlock(allocMutexPtr); newPtr = TclpAlloc(numBytes); if (newPtr == NULL) { return NULL; } maxSize -= OVERHEAD; if (maxSize < numBytes) { numBytes = maxSize; } memcpy(newPtr, oldPtr, (size_t) numBytes); TclpFree(oldPtr); return newPtr; } /* * Ok, we don't have to copy, it fits as-is */ #ifdef RCHECK overPtr->realBlockSize = (numBytes + RSLOP - 1) & ~(RSLOP - 1); BLOCK_END(overPtr) = RMAGIC; #endif Tcl_MutexUnlock(allocMutexPtr); return(oldPtr); }
int contained_in (const struct block *a, const struct block *b) { int i, j; if (!a || !b) return 0; /* APPLE LOCAL begin address ranges */ if (BLOCK_RANGES (a) == NULL && BLOCK_RANGES (b) == NULL) { /* APPLE LOCAL end address ranges */ return BLOCK_START (a) >= BLOCK_START (b) && BLOCK_END (a) <= BLOCK_END (b); /* APPLE LOCAL begin address ranges */ } else if (!BLOCK_RANGES (a)) { /* Block A has a single contiguous address range, but block B has multiple non-contiguous ranges. A is contained in B if A's address range fits within ANY of B's address ranges. */ for (i = 0; i < BLOCK_RANGES (b)->nelts; i++) if (BLOCK_START (a) >= BLOCK_RANGE_START (b, i) && BLOCK_END (a) <= BLOCK_RANGE_END (b, i)) { return 1; /* A's scope fits within one of B's ranges */ } return 0; /* A's scope did not fit within any of B's ranges */ } else if (!BLOCK_RANGES (b)) { /* Block B has a single contiguous address range, but block A has multiple non-contiguous ranges. A is contained in B if ALL of A's address ranges fit within B's address range. */ for (i = 0; i < BLOCK_RANGES (a)->nelts; i++) if (BLOCK_RANGE_START (a, i) < BLOCK_START (b) || BLOCK_RANGE_END (a, i) > BLOCK_END (b)) { return 0; /* One of A's ranges is outside B's scope */ } return 1; /* All of A's ranges are within B's scope */ } else { /* Both block A and block B have non-contiguous address ranges. A is contained in B if all of A's address ranges fit within at least one of B's address ranges. */ int fits; for (i = 0; i < BLOCK_RANGES (a)->nelts; i++) { fits = 0; for (j = 0; j < BLOCK_RANGES (b)->nelts && !fits; j++) if (BLOCK_RANGE_START (a, i) >= BLOCK_RANGE_START (b, j) && BLOCK_RANGE_END (a, i) <= BLOCK_RANGE_END (b, j)) { fits = 1; } if (fits == 0) { /* One of A's ranges is is not contained within any B range */ return 0; } } return 1; /* All of A's ranges are contained within B's ranges */ } /* APPLE LOCAL end address ranges */ return 0; /* notreached */ }
int inside_main_func (CORE_ADDR pc) { struct minimal_symbol *msymbol; if (symfile_objfile == 0) return 0; msymbol = lookup_minimal_symbol (main_name (), NULL, symfile_objfile); /* If the address range hasn't been set up at symbol reading time, set it up now. */ if (msymbol != NULL && symfile_objfile->ei.main_func_lowpc == INVALID_ENTRY_LOWPC && symfile_objfile->ei.main_func_highpc == INVALID_ENTRY_HIGHPC) { /* brobecker/2003-10-10: We used to rely on lookup_symbol() to search the symbol associated to the "main" function. Unfortunately, lookup_symbol() uses the current-language la_lookup_symbol_nonlocal function to do the global symbol search. Depending on the language, this can introduce certain side-effects, because certain languages, for instance Ada, may find more than one match. Therefore we prefer to search the "main" function symbol using its address rather than its name. */ struct symbol *mainsym = find_pc_function (SYMBOL_VALUE_ADDRESS (msymbol)); if (mainsym && SYMBOL_CLASS (mainsym) == LOC_BLOCK) { symfile_objfile->ei.main_func_lowpc = BLOCK_START (SYMBOL_BLOCK_VALUE (mainsym)); symfile_objfile->ei.main_func_highpc = BLOCK_END (SYMBOL_BLOCK_VALUE (mainsym)); } } /* Not in the normal symbol tables, see if "main" is in the partial symbol table. If it's not, then give up. */ if (msymbol != NULL && MSYMBOL_TYPE (msymbol) == mst_text) { CORE_ADDR maddr = SYMBOL_VALUE_ADDRESS (msymbol); asection *msect = SYMBOL_BFD_SECTION (msymbol); struct obj_section *osect = find_pc_sect_section (maddr, msect); if (osect != NULL) { int i; /* Step over other symbols at this same address, and symbols in other sections, to find the next symbol in this section with a different address. */ for (i = 1; SYMBOL_LINKAGE_NAME (msymbol + i) != NULL; i++) { if (SYMBOL_VALUE_ADDRESS (msymbol + i) != maddr && SYMBOL_BFD_SECTION (msymbol + i) == msect) break; } symfile_objfile->ei.main_func_lowpc = maddr; /* Use the lesser of the next minimal symbol in the same section, or the end of the section, as the end of the function. */ if (SYMBOL_LINKAGE_NAME (msymbol + i) != NULL && SYMBOL_VALUE_ADDRESS (msymbol + i) < osect->endaddr) symfile_objfile->ei.main_func_highpc = SYMBOL_VALUE_ADDRESS (msymbol + i); else /* We got the start address from the last msymbol in the objfile. So the end address is the end of the section. */ symfile_objfile->ei.main_func_highpc = osect->endaddr; } } return (symfile_objfile->ei.main_func_lowpc <= pc && symfile_objfile->ei.main_func_highpc > pc); }
int find_pc_sect_partial_function (CORE_ADDR pc, asection *section, char **name, CORE_ADDR *address, CORE_ADDR *endaddr) { struct partial_symtab *pst; struct symbol *f; struct minimal_symbol *msymbol; struct partial_symbol *psb; struct obj_section *osect; int i; CORE_ADDR mapped_pc; mapped_pc = overlay_mapped_address (pc, section); if (mapped_pc >= cache_pc_function_low && mapped_pc < cache_pc_function_high && section == cache_pc_function_section) goto return_cached_value; /* If sigtramp is in the u area, it counts as a function (especially important for step_1). */ if (SIGTRAMP_START_P () && PC_IN_SIGTRAMP (mapped_pc, (char *) NULL)) { cache_pc_function_low = SIGTRAMP_START (mapped_pc); cache_pc_function_high = SIGTRAMP_END (mapped_pc); cache_pc_function_name = "<sigtramp>"; cache_pc_function_section = section; goto return_cached_value; } msymbol = lookup_minimal_symbol_by_pc_section (mapped_pc, section); pst = find_pc_sect_psymtab (mapped_pc, section); if (pst) { /* Need to read the symbols to get a good value for the end address. */ if (endaddr != NULL && !pst->readin) { /* Need to get the terminal in case symbol-reading produces output. */ target_terminal_ours_for_output (); PSYMTAB_TO_SYMTAB (pst); } if (pst->readin) { /* Checking whether the msymbol has a larger value is for the "pathological" case mentioned in print_frame_info. */ f = find_pc_sect_function (mapped_pc, section); if (f != NULL && (msymbol == NULL || (BLOCK_START (SYMBOL_BLOCK_VALUE (f)) >= SYMBOL_VALUE_ADDRESS (msymbol)))) { cache_pc_function_low = BLOCK_START (SYMBOL_BLOCK_VALUE (f)); cache_pc_function_high = BLOCK_END (SYMBOL_BLOCK_VALUE (f)); cache_pc_function_name = DEPRECATED_SYMBOL_NAME (f); cache_pc_function_section = section; goto return_cached_value; } } else { /* Now that static symbols go in the minimal symbol table, perhaps we could just ignore the partial symbols. But at least for now we use the partial or minimal symbol, whichever is larger. */ psb = find_pc_sect_psymbol (pst, mapped_pc, section); if (psb && (msymbol == NULL || (SYMBOL_VALUE_ADDRESS (psb) >= SYMBOL_VALUE_ADDRESS (msymbol)))) { /* This case isn't being cached currently. */ if (address) *address = SYMBOL_VALUE_ADDRESS (psb); if (name) *name = DEPRECATED_SYMBOL_NAME (psb); /* endaddr non-NULL can't happen here. */ return 1; } } } /* Not in the normal symbol tables, see if the pc is in a known section. If it's not, then give up. This ensures that anything beyond the end of the text seg doesn't appear to be part of the last function in the text segment. */ osect = find_pc_sect_section (mapped_pc, section); if (!osect) msymbol = NULL; /* Must be in the minimal symbol table. */ if (msymbol == NULL) { /* No available symbol. */ if (name != NULL) *name = 0; if (address != NULL) *address = 0; if (endaddr != NULL) *endaddr = 0; return 0; } cache_pc_function_low = SYMBOL_VALUE_ADDRESS (msymbol); cache_pc_function_name = DEPRECATED_SYMBOL_NAME (msymbol); cache_pc_function_section = section; /* Use the lesser of the next minimal symbol in the same section, or the end of the section, as the end of the function. */ /* Step over other symbols at this same address, and symbols in other sections, to find the next symbol in this section with a different address. */ for (i = 1; DEPRECATED_SYMBOL_NAME (msymbol + i) != NULL; i++) { if (SYMBOL_VALUE_ADDRESS (msymbol + i) != SYMBOL_VALUE_ADDRESS (msymbol) && SYMBOL_BFD_SECTION (msymbol + i) == SYMBOL_BFD_SECTION (msymbol)) break; } if (DEPRECATED_SYMBOL_NAME (msymbol + i) != NULL && SYMBOL_VALUE_ADDRESS (msymbol + i) < osect->endaddr) cache_pc_function_high = SYMBOL_VALUE_ADDRESS (msymbol + i); else /* We got the start address from the last msymbol in the objfile. So the end address is the end of the section. */ cache_pc_function_high = osect->endaddr; return_cached_value: if (address) { if (pc_in_unmapped_range (pc, section)) *address = overlay_unmapped_address (cache_pc_function_low, section); else *address = cache_pc_function_low; } if (name) *name = cache_pc_function_name; if (endaddr) { if (pc_in_unmapped_range (pc, section)) { /* Because the high address is actually beyond the end of the function (and therefore possibly beyond the end of the overlay), we must actually convert (high - 1) and then add one to that. */ *endaddr = 1 + overlay_unmapped_address (cache_pc_function_high - 1, section); } else *endaddr = cache_pc_function_high; } return 1; }
int find_pc_partial_function_gnu_ifunc (CORE_ADDR pc, const char **name, CORE_ADDR *address, CORE_ADDR *endaddr, int *is_gnu_ifunc_p) { struct obj_section *section; struct symbol *f; struct minimal_symbol *msymbol; struct symtab *symtab = NULL; struct objfile *objfile; int i; CORE_ADDR mapped_pc; /* To ensure that the symbol returned belongs to the correct setion (and that the last [random] symbol from the previous section isn't returned) try to find the section containing PC. First try the overlay code (which by default returns NULL); and second try the normal section code (which almost always succeeds). */ section = find_pc_overlay (pc); if (section == NULL) section = find_pc_section (pc); mapped_pc = overlay_mapped_address (pc, section); if (mapped_pc >= cache_pc_function_low && mapped_pc < cache_pc_function_high && section == cache_pc_function_section) goto return_cached_value; msymbol = lookup_minimal_symbol_by_pc_section (mapped_pc, section); ALL_OBJFILES (objfile) { if (objfile->sf) symtab = objfile->sf->qf->find_pc_sect_symtab (objfile, msymbol, mapped_pc, section, 0); if (symtab) break; } if (symtab) { /* Checking whether the msymbol has a larger value is for the "pathological" case mentioned in print_frame_info. */ f = find_pc_sect_function (mapped_pc, section); if (f != NULL && (msymbol == NULL || (BLOCK_START (SYMBOL_BLOCK_VALUE (f)) >= SYMBOL_VALUE_ADDRESS (msymbol)))) { cache_pc_function_low = BLOCK_START (SYMBOL_BLOCK_VALUE (f)); cache_pc_function_high = BLOCK_END (SYMBOL_BLOCK_VALUE (f)); cache_pc_function_name = SYMBOL_LINKAGE_NAME (f); cache_pc_function_section = section; cache_pc_function_is_gnu_ifunc = TYPE_GNU_IFUNC (SYMBOL_TYPE (f)); goto return_cached_value; } } /* Not in the normal symbol tables, see if the pc is in a known section. If it's not, then give up. This ensures that anything beyond the end of the text seg doesn't appear to be part of the last function in the text segment. */ if (!section) msymbol = NULL; /* Must be in the minimal symbol table. */ if (msymbol == NULL) { /* No available symbol. */ if (name != NULL) *name = 0; if (address != NULL) *address = 0; if (endaddr != NULL) *endaddr = 0; if (is_gnu_ifunc_p != NULL) *is_gnu_ifunc_p = 0; return 0; } cache_pc_function_low = SYMBOL_VALUE_ADDRESS (msymbol); cache_pc_function_name = SYMBOL_LINKAGE_NAME (msymbol); cache_pc_function_section = section; cache_pc_function_is_gnu_ifunc = MSYMBOL_TYPE (msymbol) == mst_text_gnu_ifunc; /* If the minimal symbol has a size, use it for the cache. Otherwise use the lesser of the next minimal symbol in the same section, or the end of the section, as the end of the function. */ if (MSYMBOL_SIZE (msymbol) != 0) cache_pc_function_high = cache_pc_function_low + MSYMBOL_SIZE (msymbol); else { /* Step over other symbols at this same address, and symbols in other sections, to find the next symbol in this section with a different address. */ for (i = 1; SYMBOL_LINKAGE_NAME (msymbol + i) != NULL; i++) { if (SYMBOL_VALUE_ADDRESS (msymbol + i) != SYMBOL_VALUE_ADDRESS (msymbol) && SYMBOL_OBJ_SECTION (msymbol + i) == SYMBOL_OBJ_SECTION (msymbol)) break; } if (SYMBOL_LINKAGE_NAME (msymbol + i) != NULL && SYMBOL_VALUE_ADDRESS (msymbol + i) < obj_section_endaddr (section)) cache_pc_function_high = SYMBOL_VALUE_ADDRESS (msymbol + i); else /* We got the start address from the last msymbol in the objfile. So the end address is the end of the section. */ cache_pc_function_high = obj_section_endaddr (section); } return_cached_value: if (address) { if (pc_in_unmapped_range (pc, section)) *address = overlay_unmapped_address (cache_pc_function_low, section); else *address = cache_pc_function_low; } if (name) *name = cache_pc_function_name; if (endaddr) { if (pc_in_unmapped_range (pc, section)) { /* Because the high address is actually beyond the end of the function (and therefore possibly beyond the end of the overlay), we must actually convert (high - 1) and then add one to that. */ *endaddr = 1 + overlay_unmapped_address (cache_pc_function_high - 1, section); } else *endaddr = cache_pc_function_high; } if (is_gnu_ifunc_p) *is_gnu_ifunc_p = cache_pc_function_is_gnu_ifunc; return 1; }
int find_pc_partial_function (CORE_ADDR pc, const char **name, CORE_ADDR *address, CORE_ADDR *endaddr, const struct block **block) { struct obj_section *section; struct symbol *f; struct bound_minimal_symbol msymbol; struct compunit_symtab *compunit_symtab = NULL; CORE_ADDR mapped_pc; /* To ensure that the symbol returned belongs to the correct setion (and that the last [random] symbol from the previous section isn't returned) try to find the section containing PC. First try the overlay code (which by default returns NULL); and second try the normal section code (which almost always succeeds). */ section = find_pc_overlay (pc); if (section == NULL) section = find_pc_section (pc); mapped_pc = overlay_mapped_address (pc, section); if (mapped_pc >= cache_pc_function_low && mapped_pc < cache_pc_function_high && section == cache_pc_function_section) goto return_cached_value; msymbol = lookup_minimal_symbol_by_pc_section (mapped_pc, section); for (objfile *objfile : current_program_space->objfiles ()) { if (objfile->sf) { compunit_symtab = objfile->sf->qf->find_pc_sect_compunit_symtab (objfile, msymbol, mapped_pc, section, 0); } if (compunit_symtab != NULL) break; } if (compunit_symtab != NULL) { /* Checking whether the msymbol has a larger value is for the "pathological" case mentioned in stack.c:find_frame_funname. We use BLOCK_ENTRY_PC instead of BLOCK_START_PC for this comparison because the minimal symbol should refer to the function's entry pc which is not necessarily the lowest address of the function. This will happen when the function has more than one range and the entry pc is not within the lowest range of addresses. */ f = find_pc_sect_function (mapped_pc, section); if (f != NULL && (msymbol.minsym == NULL || (BLOCK_ENTRY_PC (SYMBOL_BLOCK_VALUE (f)) >= BMSYMBOL_VALUE_ADDRESS (msymbol)))) { const struct block *b = SYMBOL_BLOCK_VALUE (f); cache_pc_function_name = SYMBOL_LINKAGE_NAME (f); cache_pc_function_section = section; cache_pc_function_block = b; /* For blocks occupying contiguous addresses (i.e. no gaps), the low and high cache addresses are simply the start and end of the block. For blocks with non-contiguous ranges, we have to search for the range containing mapped_pc and then use the start and end of that range. This causes the returned *ADDRESS and *ENDADDR values to be limited to the range in which mapped_pc is found. See comment preceding declaration of find_pc_partial_function in symtab.h for more information. */ if (BLOCK_CONTIGUOUS_P (b)) { cache_pc_function_low = BLOCK_START (b); cache_pc_function_high = BLOCK_END (b); } else { int i; for (i = 0; i < BLOCK_NRANGES (b); i++) { if (BLOCK_RANGE_START (b, i) <= mapped_pc && mapped_pc < BLOCK_RANGE_END (b, i)) { cache_pc_function_low = BLOCK_RANGE_START (b, i); cache_pc_function_high = BLOCK_RANGE_END (b, i); break; } } /* Above loop should exit via the break. */ gdb_assert (i < BLOCK_NRANGES (b)); } goto return_cached_value; } } /* Not in the normal symbol tables, see if the pc is in a known section. If it's not, then give up. This ensures that anything beyond the end of the text seg doesn't appear to be part of the last function in the text segment. */ if (!section) msymbol.minsym = NULL; /* Must be in the minimal symbol table. */ if (msymbol.minsym == NULL) { /* No available symbol. */ if (name != NULL) *name = 0; if (address != NULL) *address = 0; if (endaddr != NULL) *endaddr = 0; return 0; } cache_pc_function_low = BMSYMBOL_VALUE_ADDRESS (msymbol); cache_pc_function_name = MSYMBOL_LINKAGE_NAME (msymbol.minsym); cache_pc_function_section = section; cache_pc_function_high = minimal_symbol_upper_bound (msymbol); cache_pc_function_block = nullptr; return_cached_value: if (address) { if (pc_in_unmapped_range (pc, section)) *address = overlay_unmapped_address (cache_pc_function_low, section); else *address = cache_pc_function_low; } if (name) *name = cache_pc_function_name; if (endaddr) { if (pc_in_unmapped_range (pc, section)) { /* Because the high address is actually beyond the end of the function (and therefore possibly beyond the end of the overlay), we must actually convert (high - 1) and then add one to that. */ *endaddr = 1 + overlay_unmapped_address (cache_pc_function_high - 1, section); } else *endaddr = cache_pc_function_high; } if (block != nullptr) *block = cache_pc_function_block; return 1; }
int addr_inside_main_func (CORE_ADDR pc) { struct minimal_symbol *msymbol; if (symfile_objfile == 0) return 0; /* APPLE LOCAL begin don't recompute start/end of main */ /* If we've already found the start/end addrs of main, don't recompute them. This will probably be fixed in the FSF sources soon too, in which case this change can be dropped. jmolenda/2004-04-28 */ if (symfile_objfile->ei.main_func_lowpc != INVALID_ENTRY_LOWPC && symfile_objfile->ei.main_func_highpc != INVALID_ENTRY_LOWPC) return (symfile_objfile->ei.main_func_lowpc <= pc && symfile_objfile->ei.main_func_highpc > pc); /* APPLE LOCAL end don't recompute start/end of main */ /* APPLE LOCAL begin don't restrict lookup_minimal_symbol's object file */ /* Don't restrict lookup_minimal_symbol's object file to symfile_objfile -- this will fail for ZeroLink apps where symfile_objfile is just the ZL launcher stub. */ msymbol = lookup_minimal_symbol (main_name (), NULL, NULL); /* APPLE LOCAL end don't restrict lookup_minimal_symbol's object file */ /* If the address range hasn't been set up at symbol reading time, set it up now. */ if (msymbol != NULL && symfile_objfile->ei.main_func_lowpc == INVALID_ENTRY_LOWPC && symfile_objfile->ei.main_func_highpc == INVALID_ENTRY_HIGHPC) { /* brobecker/2003-10-10: We used to rely on lookup_symbol() to search the symbol associated to the "main" function. Unfortunately, lookup_symbol() uses the current-language la_lookup_symbol_nonlocal function to do the global symbol search. Depending on the language, this can introduce certain side-effects, because certain languages, for instance Ada, may find more than one match. Therefore we prefer to search the "main" function symbol using its address rather than its name. */ struct symbol *mainsym = find_pc_function (SYMBOL_VALUE_ADDRESS (msymbol)); if (mainsym && SYMBOL_CLASS (mainsym) == LOC_BLOCK) { /* APPLE LOCAL begin address ranges */ struct block *bl = SYMBOL_BLOCK_VALUE (mainsym); if (BLOCK_RANGES (bl)) { symfile_objfile->ei.main_func_lowpc = BLOCK_LOWEST_PC (bl); symfile_objfile->ei.main_func_highpc = BLOCK_HIGHEST_PC (bl); } else { symfile_objfile->ei.main_func_lowpc = BLOCK_START (SYMBOL_BLOCK_VALUE (mainsym)); symfile_objfile->ei.main_func_highpc = BLOCK_END (SYMBOL_BLOCK_VALUE (mainsym)); } /* APPLE LOCAL end address ranges */ } } /* Not in the normal symbol tables, see if "main" is in the partial symbol table. If it's not, then give up. */ if (msymbol != NULL && MSYMBOL_TYPE (msymbol) == mst_text) { CORE_ADDR maddr = SYMBOL_VALUE_ADDRESS (msymbol); asection *msect = SYMBOL_BFD_SECTION (msymbol); struct obj_section *osect = find_pc_sect_section (maddr, msect); if (osect != NULL) { int i; /* Step over other symbols at this same address, and symbols in other sections, to find the next symbol in this section with a different address. */ for (i = 1; SYMBOL_LINKAGE_NAME (msymbol + i) != NULL; i++) { if (SYMBOL_VALUE_ADDRESS (msymbol + i) != maddr && SYMBOL_BFD_SECTION (msymbol + i) == msect) break; } symfile_objfile->ei.main_func_lowpc = maddr; /* Use the lesser of the next minimal symbol in the same section, or the end of the section, as the end of the function. */ if (SYMBOL_LINKAGE_NAME (msymbol + i) != NULL && SYMBOL_VALUE_ADDRESS (msymbol + i) < osect->endaddr) symfile_objfile->ei.main_func_highpc = SYMBOL_VALUE_ADDRESS (msymbol + i); else /* We got the start address from the last msymbol in the objfile. So the end address is the end of the section. */ symfile_objfile->ei.main_func_highpc = osect->endaddr; } } return (symfile_objfile->ei.main_func_lowpc <= pc && symfile_objfile->ei.main_func_highpc > pc); }
static int find_pc_partial_function_impl (CORE_ADDR pc, char **name, CORE_ADDR *address, CORE_ADDR *endaddr, int inlining_flag) { struct bfd_section *section; struct partial_symtab *pst; struct symbol *f; struct minimal_symbol *msymbol; struct partial_symbol *psb; struct obj_section *osect; int i; CORE_ADDR mapped_pc; /* To ensure that the symbol returned belongs to the correct setion (and that the last [random] symbol from the previous section isn't returned) try to find the section containing PC. First try the overlay code (which by default returns NULL); and second try the normal section code (which almost always succeeds). */ section = find_pc_overlay (pc); if (section == NULL) { struct obj_section *obj_section = find_pc_section (pc); if (obj_section == NULL) section = NULL; else section = obj_section->the_bfd_section; } mapped_pc = overlay_mapped_address (pc, section); if (mapped_pc >= cache_pc_function_low && mapped_pc < cache_pc_function_high && section == cache_pc_function_section && inlining_flag == cache_pc_function_inlining) goto return_cached_value; cache_pc_function_inlining = inlining_flag; msymbol = lookup_minimal_symbol_by_pc_section (mapped_pc, section); pst = find_pc_sect_psymtab (mapped_pc, section); if (pst) { /* Need to read the symbols to get a good value for the end address. */ if (endaddr != NULL && !pst->readin) { /* Need to get the terminal in case symbol-reading produces output. */ target_terminal_ours_for_output (); PSYMTAB_TO_SYMTAB (pst); } if (pst->readin) { /* Checking whether the msymbol has a larger value is for the "pathological" case mentioned in print_frame_info. */ if (inlining_flag) f = find_pc_sect_function (mapped_pc, section); else f = find_pc_sect_function_no_inlined (mapped_pc, section); /* APPLE LOCAL begin address ranges */ if (f != NULL && (msymbol == NULL || (BLOCK_LOWEST_PC (SYMBOL_BLOCK_VALUE (f)) >= SYMBOL_VALUE_ADDRESS (msymbol)))) { cache_pc_function_low = BLOCK_LOWEST_PC (SYMBOL_BLOCK_VALUE (f)); if (BLOCK_RANGES (SYMBOL_BLOCK_VALUE (f))) cache_pc_function_high = BLOCK_HIGHEST_PC (SYMBOL_BLOCK_VALUE (f)); else cache_pc_function_high = BLOCK_END (SYMBOL_BLOCK_VALUE (f)); /* APPLE LOCAL end address ranges */ cache_pc_function_name = DEPRECATED_SYMBOL_NAME (f); cache_pc_function_section = section; goto return_cached_value; } } else { /* Now that static symbols go in the minimal symbol table, perhaps we could just ignore the partial symbols. But at least for now we use the partial or minimal symbol, whichever is larger. */ psb = find_pc_sect_psymbol (pst, mapped_pc, section); if (psb && (msymbol == NULL || (SYMBOL_VALUE_ADDRESS (psb) >= SYMBOL_VALUE_ADDRESS (msymbol)))) { /* This case isn't being cached currently. */ if (address) *address = SYMBOL_VALUE_ADDRESS (psb); if (name) *name = DEPRECATED_SYMBOL_NAME (psb); /* endaddr non-NULL can't happen here. */ return 1; } } } /* Not in the normal symbol tables, see if the pc is in a known section. If it's not, then give up. This ensures that anything beyond the end of the text seg doesn't appear to be part of the last function in the text segment. */ osect = find_pc_sect_section (mapped_pc, section); if (!osect) msymbol = NULL; /* Must be in the minimal symbol table. */ if (msymbol == NULL) { /* No available symbol. */ if (name != NULL) *name = 0; if (address != NULL) *address = 0; if (endaddr != NULL) *endaddr = 0; return 0; } cache_pc_function_low = SYMBOL_VALUE_ADDRESS (msymbol); cache_pc_function_name = DEPRECATED_SYMBOL_NAME (msymbol); cache_pc_function_section = section; /* Use the lesser of the next minimal symbol in the same section, or the end of the section, as the end of the function. */ /* Step over other symbols at this same address, and symbols in other sections, to find the next symbol in this section with a different address. */ for (i = 1; DEPRECATED_SYMBOL_NAME (msymbol + i) != NULL; i++) { if (SYMBOL_VALUE_ADDRESS (msymbol + i) != SYMBOL_VALUE_ADDRESS (msymbol) && SYMBOL_BFD_SECTION (msymbol + i) == SYMBOL_BFD_SECTION (msymbol)) break; } if (DEPRECATED_SYMBOL_NAME (msymbol + i) != NULL && SYMBOL_VALUE_ADDRESS (msymbol + i) < osect->endaddr) cache_pc_function_high = SYMBOL_VALUE_ADDRESS (msymbol + i); else /* We got the start address from the last msymbol in the objfile. So the end address is the end of the section. */ cache_pc_function_high = osect->endaddr; return_cached_value: if (address) { if (pc_in_unmapped_range (pc, section)) *address = overlay_unmapped_address (cache_pc_function_low, section); else *address = cache_pc_function_low; } if (name) *name = cache_pc_function_name; if (endaddr) { if (pc_in_unmapped_range (pc, section)) { /* Because the high address is actually beyond the end of the function (and therefore possibly beyond the end of the overlay), we must actually convert (high - 1) and then add one to that. */ *endaddr = 1 + overlay_unmapped_address (cache_pc_function_high - 1, section); } else *endaddr = cache_pc_function_high; } return 1; }
char * TclpAlloc( unsigned int numBytes) /* Number of bytes to allocate. */ { register union overhead *overPtr; register long bucket; register unsigned amount; struct block *bigBlockPtr; if (!allocInit) { /* * We have to make the "self initializing" because Tcl_Alloc may be * used before any other part of Tcl. E.g., see main() for tclsh! */ TclInitAlloc(); } Tcl_MutexLock(allocMutexPtr); /* * First the simple case: we simple allocate big blocks directly. */ if (numBytes + OVERHEAD >= MAXMALLOC) { bigBlockPtr = (struct block *) TclpSysAlloc((unsigned) (sizeof(struct block) + OVERHEAD + numBytes), 0); if (bigBlockPtr == NULL) { Tcl_MutexUnlock(allocMutexPtr); return NULL; } bigBlockPtr->nextPtr = bigBlocks.nextPtr; bigBlocks.nextPtr = bigBlockPtr; bigBlockPtr->prevPtr = &bigBlocks; bigBlockPtr->nextPtr->prevPtr = bigBlockPtr; overPtr = (union overhead *) (bigBlockPtr + 1); overPtr->overMagic0 = overPtr->overMagic1 = MAGIC; overPtr->bucketIndex = 0xff; #ifdef MSTATS numMallocs[NBUCKETS]++; #endif #ifdef RCHECK /* * Record allocated size of block and bound space with magic numbers. */ overPtr->realBlockSize = (numBytes + RSLOP - 1) & ~(RSLOP - 1); overPtr->rangeCheckMagic = RMAGIC; BLOCK_END(overPtr) = RMAGIC; #endif Tcl_MutexUnlock(allocMutexPtr); return (void *)(overPtr+1); } /* * Convert amount of memory requested into closest block size stored in * hash buckets which satisfies request. Account for space used per block * for accounting. */ amount = MINBLOCK; /* size of first bucket */ bucket = MINBLOCK >> 4; while (numBytes + OVERHEAD > amount) { amount <<= 1; if (amount == 0) { Tcl_MutexUnlock(allocMutexPtr); return NULL; } bucket++; } ASSERT(bucket < NBUCKETS); /* * If nothing in hash bucket right now, request more memory from the * system. */ if ((overPtr = nextf[bucket]) == NULL) { MoreCore(bucket); if ((overPtr = nextf[bucket]) == NULL) { Tcl_MutexUnlock(allocMutexPtr); return NULL; } } /* * Remove from linked list */ nextf[bucket] = overPtr->next; overPtr->overMagic0 = overPtr->overMagic1 = MAGIC; overPtr->bucketIndex = (unsigned char) bucket; #ifdef MSTATS numMallocs[bucket]++; #endif #ifdef RCHECK /* * Record allocated size of block and bound space with magic numbers. */ overPtr->realBlockSize = (numBytes + RSLOP - 1) & ~(RSLOP - 1); overPtr->rangeCheckMagic = RMAGIC; BLOCK_END(overPtr) = RMAGIC; #endif Tcl_MutexUnlock(allocMutexPtr); return ((char *)(overPtr + 1)); }