void *chk_realloc(void *ptr, size_t size) { struct hdr *hdr; // log_message("%s: %s\n", __FILE__, __FUNCTION__); if (!size) { chk_free(ptr); return NULL; } if (!ptr) return chk_malloc(size); hdr = meta(ptr); if (del(hdr) < 0) { intptr_t bt[MAX_BACKTRACE_DEPTH]; int depth; depth = get_backtrace(bt, MAX_BACKTRACE_DEPTH); if (hdr->tag == BACKLOG_TAG) { log_message("+++ REALLOCATION %p SIZE %d OF FREED MEMORY!\n", user(hdr), size, hdr->size); log_message("+++ ALLOCATION %p SIZE %d ALLOCATED HERE:\n", user(hdr), hdr->size); print_backtrace(hdr->bt, hdr->bt_depth); /* hdr->freed_bt_depth should be nonzero here */ log_message("+++ ALLOCATION %p SIZE %d FIRST FREED HERE:\n", user(hdr), hdr->size); print_backtrace(hdr->freed_bt, hdr->freed_bt_depth); log_message("+++ ALLOCATION %p SIZE %d NOW BEING REALLOCATED HERE:\n", user(hdr), hdr->size); print_backtrace(bt, depth); /* We take the memory out of the backlog and fall through so the * reallocation below succeeds. Since we didn't really free it, we * can default to this behavior. */ del_from_backlog(hdr); } else { log_message("+++ REALLOCATION %p SIZE %d IS CORRUPTED OR NOT ALLOCATED VIA TRACKER!\n", user(hdr), size); print_backtrace(bt, depth); // just get a whole new allocation and leak the old one return dlrealloc(0, size); // return dlrealloc(user(hdr), size); // assuming it was allocated externally } } hdr = dlrealloc(hdr, sizeof(struct hdr) + size + sizeof(struct ftr)); if (hdr) { hdr->bt_depth = get_backtrace(hdr->bt, MAX_BACKTRACE_DEPTH); add(hdr, size); return user(hdr); } return NULL; }
/// Segmentation fault signal handler. void segfaultHandler(int sigtype) { nvlog_flush(); print_backtrace(sigtype); exit(-1); }
void sigdie(int sig, const char* s) { fprintf(stderr, "------------------------------------------------------------------------\n"); print_backtrace(); #ifndef __APPLE__ /* See http://trac.sagemath.org/13889 for how Apple screwed this up */ fprintf(stderr, "------------------------------------------------------------------------\n"); print_enhanced_backtrace(); #endif fprintf(stderr, "------------------------------------------------------------------------\n" "%s\n" "This probably occurred because a *compiled* component of Sage has a bug\n" "in it and is not properly wrapped with sig_on(), sig_off(). You might\n" "want to run Sage under gdb with 'sage -gdb' to debug this.\n" "Sage will now terminate.\n" "------------------------------------------------------------------------\n", s); fflush(stderr); /* Suicide with signal ``sig`` */ kill(getpid(), sig); /* We should be dead! */ exit(128 + sig); }
int MPIDI_CH3_Abort(int exit_code, char *error_msg) { MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3_ABORT); MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3_ABORT); /* print backtrace */ if (show_backtrace) print_backtrace(); PMI_Abort(exit_code, error_msg); /* if abort returns for some reason, exit here */ MPIU_Error_printf("%s", error_msg); fflush(stderr); exit(exit_code); #if defined(__SUNPRO_C) || defined(__SUNPRO_CC) #pragma error_messages(off, E_STATEMENT_NOT_REACHED) #endif /* defined(__SUNPRO_C) || defined(__SUNPRO_CC) */ MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3_ABORT); return MPI_ERR_INTERN; #if defined(__SUNPRO_C) || defined(__SUNPRO_CC) #pragma error_messages(default, E_STATEMENT_NOT_REACHED) #endif /* defined(__SUNPRO_C) || defined(__SUNPRO_CC) */ }
static void memkey_add(void *p,size_t size){ if(!p || disable_memdbg) return; LOCK(mutex_mem); int disable_save=disable_memdbg; disable_memdbg=1;//prevent recursive calling T_MEMKEY *key=calloc_default(1,sizeof(T_MEMKEY)); key->p=p; key->size=size; key->nfunc=backtrace(key->func,DT); void **found=tfind(key, &MROOT, key_cmp); if(found){ T_MEMKEY*key1=*found; warning("memkey_add: %p already exists with size %zd. new size %zd\n", p, key1->size, size); key1->size=size; }else{ if(!tsearch(key, &MROOT, key_cmp)){ warning("memkey_add: Error inserting to tree\n"); } memcnt++; memalloc+=size; } disable_memdbg=disable_save; if(MEM_VERBOSE==1){ info("%p malloced with %zu bytes: %s\n",p, size, found?"collision":"success"); }else if(MEM_VERBOSE==2 && size>1024){ info("Alloc:%.3f MB mem used\n", (memalloc-memfree)/1024./1024.); } UNLOCK(mutex_mem); if(found) print_backtrace(); }
static void memkey_del(void*p){ if(!p || disable_memdbg) return; void **found=0; T_MEMKEY key; LOCK(mutex_mem); key.p=p; int disable_save=disable_memdbg; disable_memdbg=1; found=tfind(&key, &MROOT, key_cmp); if(found){ T_MEMKEY* key1=*found;/*the address of allocated T_MEMKEY. */ if(MEM_VERBOSE==1){ info("%p freed with %zu bytes: success\n",p, key1->size); }else if(MEM_VERBOSE==2 && key1->size>1024){ info("Free: %.3f MB mem used\n", (memalloc-memfree)/1024./1024.); } memfree+=key1->size; memcnt--; if(!tdelete(&key, &MROOT, key_cmp)){/*return parent. */ warning("memkey_del: Error deleting old record\n"); } free_default(key1); } disable_memdbg=disable_save; UNLOCK(mutex_mem); if(!found){ warning("%p not found\n", p); print_backtrace(); } }
/* Handler for SIGINT */ void sage_interrupt_handler(int sig) { #if ENABLE_DEBUG_INTERRUPT fprintf(stderr, "\n*** SIGINT *** %s sig_on\n", (_signals.sig_on_count > 0) ? "inside" : "outside"); print_backtrace(); #endif if (_signals.sig_on_count > 0) { if (_signals.block_sigint) { /* SIGINT is blocked, so simply set _signals.interrupt_received. */ _signals.interrupt_received = 1; return; } /* Raise KeyboardInterrupt */ PyErr_SetNone(PyExc_KeyboardInterrupt); /* Jump back to sig_on() (the first one if there is a stack) */ reset_CPU(); siglongjmp(_signals.env, sig); } else { /* Set an internal Python flag that an interrupt has been * raised. This will not immediately raise an exception, only * on the next call of PyErr_CheckSignals(). We cannot simply * raise an exception here because of Python's "global * interpreter lock" -- Jeroen Demeyer */ PyErr_SetInterrupt(); _signals.interrupt_received = 1; } }
void _sig_off_warning(const char* file, int line) { char buf[320]; snprintf(buf, sizeof(buf), "sig_off() without sig_on() at %s:%i", file, line); PyErr_WarnEx(PyExc_RuntimeWarning, buf, 2); print_backtrace(); }
void strexit(const char* msg, int errval) { print_backtrace(); printf("Error: %s: %s\n", msg, strerror(errval)); cleanup(); exit(EXIT_FAILURE); }
static int scheduler_signal_handler(int sig){ /*quit listening upon signal and do clean up.*/ psignal(sig, "scheduler"); if(sig!=15) print_backtrace(); quit_listen=1; return 1; }
void simple_error(const char *file, int line_num) { fprintf(stderr, "FATAL ERROR at %s:%d\n", file, line_num); print_backtrace(); exit(1); }
void page_aligned_allocator::free(char* const block) { #ifdef TORRENT_DEBUG_BUFFERS int page = page_size(); // make the two surrounding pages non-readable and -writable mprotect(block - page, page, PROT_READ | PROT_WRITE); alloc_header* h = (alloc_header*)(block - page); int num_pages = (h->size + (page-1)) / page + 2; TORRENT_ASSERT(h->magic == 0x1337); mprotect(block + (num_pages-2) * page, page, PROT_READ | PROT_WRITE); // fprintf(stderr, "free: %p head: %p tail: %p size: %d\n", block, block - page, block + h->size, int(h->size)); h->magic = 0; #if defined __linux__ || (defined __APPLE__ && MAC_OS_X_VERSION_MIN_REQUIRED >= 1050) print_backtrace(h->stack, sizeof(h->stack)); #endif ::free(block - page); return; #endif #ifdef TORRENT_WINDOWS _aligned_free(block); #elif defined TORRENT_BEOS area_id id = area_for(block); if (id < B_OK) return; delete_area(id); #else ::free(block); #endif }
void RubyException::show(STATE) { std::cout << exception->message_c_str(state) << " (" << exception->class_object(state)->debug_str(state) << ") \n"; print_backtrace(); }
void signalHandler(int type, siginfo_t * si, void* ccontext){ lastErrorWasAssert = 0; lastErrorWasLoop = 0; SimulatedCPU cpu; if (ccontext) { cpu.set_all(ccontext); if (cpu.frame == 0) cpu.frame = cpu.stack; char *addr = (char*)(si->si_addr); char * minstack = cpu.stack < cpu.frame ? cpu.stack : cpu.frame; if (type == SIGSEGV && ptrdistance(addr, minstack) < 1024) type = SIGMYSTACKSEGV; } if (crashHandlerType & CRASH_HANDLER_PRINT_BACKTRACE || !ccontext) { print_backtrace(signalIdToName(type)); if (!ccontext) return; } if (crashHandlerType & CRASH_HANDLER_RECOVER) { if (type == SIGMYHANG) { if (!isAddressInTeXstudio(cpu.pc)) return; //don't mess with library functions lastLoopContext = *(static_cast<CPU_CONTEXT_TYPE*>(ccontext)); lastErrorWasLoop = 1; } else if (type == SIGMYSTACKSEGV) cpu.unwindStack(); lastCrashSignal = type; cpu.call((char*)(&recover)); cpu.get_all(ccontext); } }
static int scheduler_signal_handler(int sig){ /*quit listening upon signal and do clean up.*/ info("scheduler: %s", sys_siglist[sig]); if(sig!=15) print_backtrace(); quit_listen=1; return 1; }
void bug(const char* message, const char* arg) { std::cerr << "[BUG: " << message << ": " << arg << "]\n"; print_backtrace(); ::abort(); }
void* operator new (std::size_t len) throw(std::bad_alloc) { void* data = nullptr; if (enable_buffer_protection) { // Allocate requested memory + enough to fit checksum at start and end data = malloc(len + sizeof(buffer_protection_checksum) * 2); // Write checksums auto* temp = reinterpret_cast<char*>(data); memcpy(temp, &buffer_protection_checksum, sizeof(buffer_protection_checksum)); memcpy(temp + sizeof(buffer_protection_checksum) + len, &buffer_protection_checksum, sizeof(buffer_protection_checksum)); } else { data = malloc(len); } if (enable_debugging_verbose) { DPRINTF("malloc(%llu bytes) == %p\n", (unsigned long long) len, data); safe_print_symbol(1, __builtin_return_address(0)); safe_print_symbol(2, __builtin_return_address(1)); } if (UNLIKELY(!data)) { print_backtrace(); DPRINTF("malloc(%llu bytes): FAILED\n", (unsigned long long) len); throw std::bad_alloc(); } if (enable_debugging) { if (!free_allocs.empty()) { auto* x = free_allocs.pop(); new(x) allocation((char*) data, len, __builtin_return_address(0), __builtin_return_address(1), __builtin_return_address(2)); } else if (!allocs.free_capacity()) { DPRINTF("[WARNING] Internal fixed vectors are FULL, expect bogus double free messages\n"); } else { allocs.emplace((char*) data, len, __builtin_return_address(0), __builtin_return_address(1), __builtin_return_address(2)); } } if (enable_buffer_protection) { // We need to return a pointer to the allocated memory + 4 // e.g. after our first checksum return reinterpret_cast<void*>(reinterpret_cast<char*>(data) + sizeof(buffer_protection_checksum)); } else { return data; } }
void _exception(int signr, struct pt_regs *regs) { show_regs(regs); print_backtrace((unsigned long *)regs->gpr[1]); panic("Exception"); }
int dxcat(dbuf_t * target, dbuf_t * source, int start, int len) { dchecksig(target); dchecksig(source); if(len == -1) { len = source->dsize - start; } if(len < 0) { debug(DBG_GLOBAL, 0, "negative length to dxcat!"); print_backtrace(); } if(start + len > source->dsize) { debug(DBG_MEMORY, 10, "dxcat: Trying to copy % bytes from %d offset, but the total len is %d"); return 0; } debug(DBG_MEMORY, 10, "dxcat of %d bytes, taken from pos %d\n", len, start); if(dgrow(target, len)) { memcpy(&target->buf[target->dsize], &(source->buf[start]), len); debug(DBG_MEMORY, 10, "dsize before grow: %d, delta: %d", target->dsize, source->dsize); target->dsize = target->dsize + len; debug(DBG_MEMORY, 10, "new dsize after grow: %d", target->dsize); return 1; } else { return 0; } }
void strexit() { print_backtrace(); printf("Error: %s\n", strerror(errno)); cleanup(); exit(EXIT_FAILURE); }
static void signal_handler(int sig) { fprintf(stderr, "Signal %d:\n", sig); print_backtrace(); exit(1); }
void MachineCheckException(struct pt_regs *regs) { unsigned long fixup, val; if ((fixup = search_exception_table(regs->nip)) != 0) { regs->nip = fixup; val = mfspr(MCSR); /* Clear MCSR */ mtspr(SPRN_MCSR, val); return; } rt_kprintf("Machine Check Exception.\n"); rt_kprintf("Caused by (from msr): "); rt_kprintf("regs %p ", regs); val = get_esr(); if (val& ESR_IMCP) { rt_kprintf("Instruction"); mtspr(ESR, val & ~ESR_IMCP); } else { rt_kprintf("Data"); } rt_kprintf(" machine check.\n"); show_regs(regs); print_backtrace((unsigned long *)regs->gpr[1]); panic("machine check"); }
static void heaptracker_free_leaked_memory(void) { struct hdr *del; int cnt; if (num) log_message("+++ THERE ARE %d LEAKED ALLOCATIONS\n", num); while (head) { int safe; del = head; log_message("+++ DELETING %d BYTES OF LEAKED MEMORY AT %p (%d REMAINING)\n", del->size, user(del), num); if (del_leak(del, &safe)) { /* safe == 1, because the allocation is valid */ log_message("+++ ALLOCATION %p SIZE %d ALLOCATED HERE:\n", user(del), del->size); print_backtrace(del->bt, del->bt_depth); } dlfree(del); } // log_message("+++ DELETING %d BACKLOGGED ALLOCATIONS\n", backlog_num); while (backlog_head) { del = backlog_tail; del_from_backlog(del); dlfree(del); } }
/* * Drop an inode reference, freeing the inode when the last reference goes * away. */ void hammer2_inode_drop(hammer2_inode_t *ip) { hammer2_pfs_t *pmp; u_int refs; while (ip) { if (hammer2_debug & 0x80000) { kprintf("INODE-1 %p (%d->%d)\n", ip, ip->refs, ip->refs - 1); print_backtrace(8); } refs = ip->refs; cpu_ccfence(); if (refs == 1) { /* * Transition to zero, must interlock with * the inode inumber lookup tree (if applicable). * It should not be possible for anyone to race * the transition to 0. */ pmp = ip->pmp; KKASSERT(pmp); hammer2_spin_ex(&pmp->inum_spin); if (atomic_cmpset_int(&ip->refs, 1, 0)) { KKASSERT(hammer2_mtx_refs(&ip->lock) == 0); if (ip->flags & HAMMER2_INODE_ONRBTREE) { atomic_clear_int(&ip->flags, HAMMER2_INODE_ONRBTREE); RB_REMOVE(hammer2_inode_tree, &pmp->inum_tree, ip); --pmp->inum_count; } hammer2_spin_unex(&pmp->inum_spin); ip->pmp = NULL; /* * Cleaning out ip->cluster isn't entirely * trivial. */ hammer2_inode_repoint(ip, NULL, NULL); kfree(ip, pmp->minode); atomic_add_long(&pmp->inmem_inodes, -1); ip = NULL; /* will terminate loop */ } else { hammer2_spin_unex(&ip->pmp->inum_spin); } } else { /* * Non zero transition */ if (atomic_cmpset_int(&ip->refs, refs, refs - 1)) break; } } }
static void signal_handler(int signo) { connman_error("Aborting (signal %d) [%s]", signo, program_exec); print_backtrace(2); exit(EXIT_FAILURE); }
void sig_abrt_handler (int signum __attribute__ ((unused))) { set_terminal_attributes (); if (write (1, "SIGABRT received\n", 18) < 0) { // Sad thing } print_backtrace (); exit (EXIT_FAILURE); }
void assert_fail(const char *cond, const char *file, int line_num) { fprintf(stderr, "ASSERT FAILED: \"%s\", at %s:%d\n", cond, file, line_num); print_backtrace(); abort(); }
void sig_segv_handler (int signum __attribute__ ((unused))) { set_terminal_attributes (); if (write (1, "SIGSEGV received\n", 18) < 0) { // Sad thing } print_backtrace (); qthreadExitRequest (EXIT_FAILURE); }
/* * Start or restart a timeout. Installs the callout structure on the * callwheel. Callers may legally pass any value, even if 0 or negative, * but since the sc->curticks index may have already been processed a * minimum timeout of 1 tick will be enforced. * * This function will block if the callout is currently queued to a different * cpu or the callback is currently running in another thread. */ void callout_reset(struct callout *c, int to_ticks, void (*ftn)(void *), void *arg) { softclock_pcpu_t sc; globaldata_t gd; #ifdef INVARIANTS if ((c->c_flags & CALLOUT_DID_INIT) == 0) { callout_init(c); kprintf( "callout_reset(%p) from %p: callout was not initialized\n", c, ((int **)&c)[-1]); print_backtrace(-1); } #endif gd = mycpu; sc = &softclock_pcpu_ary[gd->gd_cpuid]; crit_enter_gd(gd); /* * Our cpu must gain ownership of the callout and cancel anything * still running, which is complex. The easiest way to do it is to * issue a callout_stop(). * * Clearing bits on flags is a way to guarantee they are not set, * as the cmpset atomic op will fail otherwise. PENDING and ARMED * must not be set, if we find them set we loop up and call * stop_sync() again. * */ for (;;) { int flags; int nflags; callout_stop_sync(c); flags = c->c_flags & ~(CALLOUT_PENDING | CALLOUT_ARMED); nflags = (flags & ~(CALLOUT_CPU_MASK | CALLOUT_EXECUTED)) | CALLOUT_CPU_TO_FLAGS(gd->gd_cpuid) | CALLOUT_ARMED | CALLOUT_PENDING | CALLOUT_ACTIVE; if (atomic_cmpset_int(&c->c_flags, flags, nflags)) break; } if (to_ticks <= 0) to_ticks = 1; c->c_arg = arg; c->c_func = ftn; c->c_time = sc->curticks + to_ticks; TAILQ_INSERT_TAIL(&sc->callwheel[c->c_time & cwheelmask], c, c_links.tqe); crit_exit_gd(gd); }
char* page_aligned_allocator::malloc(page_aligned_allocator::size_type bytes) { TORRENT_ASSERT(bytes > 0); // just sanity check (this needs to be pretty high // for cases where the cache size is several gigabytes) TORRENT_ASSERT(bytes < 0x30000000); TORRENT_ASSERT(int(bytes) >= page_size()); #ifdef TORRENT_DEBUG_BUFFERS const int page = page_size(); const int num_pages = (bytes + (page-1)) / page + 2; const int orig_bytes = bytes; bytes = num_pages * page; #endif char* ret; #if TORRENT_USE_POSIX_MEMALIGN if (posix_memalign(reinterpret_cast<void**>(&ret), page_size(), bytes) != 0) ret = NULL; #elif TORRENT_USE_MEMALIGN ret = static_cast<char*>(memalign(page_size(), bytes)); #elif defined TORRENT_WINDOWS ret = static_cast<char*>(_aligned_malloc(bytes, page_size())); #elif defined TORRENT_BEOS area_id id = create_area("", &ret, B_ANY_ADDRESS , (bytes + page_size() - 1) & (page_size()-1), B_NO_LOCK, B_READ_AREA | B_WRITE_AREA); if (id < B_OK) return NULL; ret = static_cast<char*>(ret); #else ret = static_cast<char*>(valloc(size_t(bytes))); #endif if (ret == NULL) return NULL; #ifdef TORRENT_DEBUG_BUFFERS // make the two surrounding pages non-readable and -writable alloc_header* h = (alloc_header*)ret; h->size = orig_bytes; h->magic = 0x1337; print_backtrace(h->stack, sizeof(h->stack)); #ifdef TORRENT_WINDOWS #define mprotect(buf, size, prot) VirtualProtect(buf, size, prot, NULL) #define PROT_READ PAGE_READONLY #endif mprotect(ret, page, PROT_READ); mprotect(ret + (num_pages-1) * page, page, PROT_READ); #ifdef TORRENT_WINDOWS #undef mprotect #undef PROT_READ #endif // fprintf(stderr, "malloc: %p head: %p tail: %p size: %d\n", ret + page, ret, ret + page + bytes, int(bytes)); return ret + page; #endif // TORRENT_DEBUG_BUFFERS return ret; }