int rootfctrl_task_kill(struct task_struct *p, struct siginfo *info, int sig, u32 secid) { char tcomm[sizeof(p->comm)]; char tcomm2[sizeof(current->comm)]; get_task_comm(tcomm2, current); get_task_comm(tcomm, p); if (!strcmp("zygote", tcomm) && (task_tgid_vnr(p) == zygote_pid)) { RTFCTL_MSG("########## %s ##########\n", __FUNCTION__); RTFCTL_MSG("pid: %d (%s) kill pid: %d (%s)\n", task_tgid_vnr(current), tcomm2, task_tgid_vnr(p), tcomm); RTFCTL_MSG("info: %x, isFromKernel: %d\n", (unsigned int)info, SI_FROMKERNEL(info)); RTFCTL_MSG("sig: %d, secid: %d\n", sig, secid); printk("[RTFCTL] zg killed\n"); zygote_pid = -1; } if (!strcmp("felica_daemon", tcomm) && (task_tgid_vnr(p) == flc_daemon_pid)) { RTFCTL_MSG("########## %s ##########\n", __FUNCTION__); RTFCTL_MSG("pid: %d (%s) kill pid: %d (%s)\n", task_tgid_vnr(current), tcomm2, task_tgid_vnr(p), tcomm); RTFCTL_MSG("info: %x, isFromKernel: %d\n", (unsigned int)info, SI_FROMKERNEL(info)); RTFCTL_MSG("sig: %d, secid: %d\n", sig, secid); printk("[RTFCTL] fdaemon killed\n"); flc_daemon_pid = -1; } if (!strcmp("felica_agent", tcomm) && (task_tgid_vnr(p) == flc_agent_pid)) { RTFCTL_MSG("########## %s ##########\n", __FUNCTION__); RTFCTL_MSG("pid: %d (%s) kill pid: %d (%s)\n", task_tgid_vnr(current), tcomm2, task_tgid_vnr(p), tcomm); RTFCTL_MSG("info: %x, isFromKernel: %d\n", (unsigned int)info, SI_FROMKERNEL(info)); RTFCTL_MSG("sig: %d, secid: %d\n", sig, secid); printk("[RTFCTL] fagent killed\n"); flc_agent_pid = -1; } if (!strcmp("installd", tcomm) && (task_tgid_vnr(p) == installd_pid)) { RTFCTL_MSG("########## %s ##########\n", __FUNCTION__); RTFCTL_MSG("pid: %d (%s) kill pid: %d (%s)\n", task_tgid_vnr(current), tcomm2, task_tgid_vnr(p), tcomm); RTFCTL_MSG("info: %x, isFromKernel: %d\n", (unsigned int)info, SI_FROMKERNEL(info)); RTFCTL_MSG("sig: %d, secid: %d\n", sig, secid); printk("[RTFCTL] isd killed\n"); installd_pid = -1; } return 0; }
void exit_handler(int signo, siginfo_t * info, void *context) { (void) context; if (info) { LEVEL_DEBUG ("Signal=%d, errno %d, code %d, pid=%ld, Threads: this=%lu main=%lu", signo, info->si_errno, info->si_code, (long int) info->si_pid, pthread_self(), main_threadid); } else { LEVEL_DEBUG("Signal=%d, Threads: this=%lu, main=%lu", signo, pthread_self(), main_threadid); } if (StateInfo.shutting_down) { LEVEL_DEBUG("exit_handler: shutdown already in progress. signo=%d, self=%lu, main=%lu", signo, pthread_self(), main_threadid); } else { StateInfo.shutting_down = 1; if (info != NULL) { if (SI_FROMUSER(info)) { LEVEL_DEBUG("Kill signal from user"); } if (SI_FROMKERNEL(info)) { LEVEL_DEBUG("Kill signal from system"); } } if (!IS_MAINTHREAD) { LEVEL_DEBUG("Kill from main thread: %lu this=%lu Signal=%d", main_threadid, pthread_self(), signo); pthread_kill(main_threadid, signo); } else { LEVEL_DEBUG("Ignore kill from this thread. main=%lu this=%lu Signal=%d", main_threadid, pthread_self(), signo); } } }
static void ucbsigvechandler(int sig, siginfo_t *sip, ucontext_t *ucp) { struct sigcontext sc; int code; char *addr; int i, j; int gwinswitch = 0; sc.sc_onstack = ((ucp->uc_stack.ss_flags & SS_ONSTACK) != 0); sc.sc_mask = set2mask(&ucp->uc_sigmask); #if defined(__amd64) sc.sc_sp = (long)ucp->uc_mcontext.gregs[REG_RSP]; sc.sc_pc = (long)ucp->uc_mcontext.gregs[REG_RIP]; sc.sc_ps = (long)ucp->uc_mcontext.gregs[REG_RFL]; sc.sc_r0 = (long)ucp->uc_mcontext.gregs[REG_RAX]; sc.sc_r1 = (long)ucp->uc_mcontext.gregs[REG_RDX]; #else sc.sc_sp = (int)ucp->uc_mcontext.gregs[UESP]; sc.sc_pc = (int)ucp->uc_mcontext.gregs[EIP]; sc.sc_ps = (int)ucp->uc_mcontext.gregs[EFL]; sc.sc_r0 = (int)ucp->uc_mcontext.gregs[EAX]; sc.sc_r1 = (int)ucp->uc_mcontext.gregs[EDX]; #endif /* * Translate signal codes from new to old. * /usr/include/sys/siginfo.h contains new codes. * /usr/ucbinclude/sys/signal.h contains old codes. */ code = 0; addr = SIG_NOADDR; if (sip != NULL && SI_FROMKERNEL(sip)) { addr = sip->si_addr; switch (sig) { case SIGILL: case SIGFPE: code = ILL_ILLINSTR_FAULT; break; case SIGBUS: switch (sip->si_code) { case BUS_ADRALN: code = BUS_ALIGN; break; case BUS_ADRERR: code = BUS_HWERR; break; default: /* BUS_OBJERR */ code = FC_MAKE_ERR(sip->si_errno); break; } break; case SIGSEGV: switch (sip->si_code) { case SEGV_MAPERR: code = SEGV_NOMAP; break; case SEGV_ACCERR: code = SEGV_PROT; break; default: code = FC_MAKE_ERR(sip->si_errno); break; } break; default: addr = SIG_NOADDR; break; } } (*_siguhandler[sig])(sig, code, &sc, addr); if (sc.sc_onstack) ucp->uc_stack.ss_flags |= SS_ONSTACK; else ucp->uc_stack.ss_flags &= ~SS_ONSTACK; mask2set(sc.sc_mask, &ucp->uc_sigmask); #if defined(__amd64) ucp->uc_mcontext.gregs[REG_RSP] = (long)sc.sc_sp; ucp->uc_mcontext.gregs[REG_RIP] = (long)sc.sc_pc; ucp->uc_mcontext.gregs[REG_RFL] = (long)sc.sc_ps; ucp->uc_mcontext.gregs[REG_RAX] = (long)sc.sc_r0; ucp->uc_mcontext.gregs[REG_RDX] = (long)sc.sc_r1; #else ucp->uc_mcontext.gregs[UESP] = (int)sc.sc_sp; ucp->uc_mcontext.gregs[EIP] = (int)sc.sc_pc; ucp->uc_mcontext.gregs[EFL] = (int)sc.sc_ps; ucp->uc_mcontext.gregs[EAX] = (int)sc.sc_r0; ucp->uc_mcontext.gregs[EDX] = (int)sc.sc_r1; #endif setcontext(ucp); }
static void ucbsigvechandler(int sig, siginfo_t *sip, ucontext_t *ucp) { struct sigcontext sc; int code; char *addr; #ifdef NEVER int gwinswitch = 0; #endif sc.sc_onstack = ((ucp->uc_stack.ss_flags & SS_ONSTACK) != 0); sc.sc_mask = set2mask(&ucp->uc_sigmask); #if defined(__sparc) if (sig == SIGFPE && sip != NULL && SI_FROMKERNEL(sip) && (sip->si_code == FPE_INTDIV || sip->si_code == FPE_INTOVF)) { /* * Hack to emulate the 4.x kernel behavior of incrementing * the PC on integer divide by zero and integer overflow * on sparc machines. (5.x does not increment the PC.) */ ucp->uc_mcontext.gregs[REG_PC] = ucp->uc_mcontext.gregs[REG_nPC]; ucp->uc_mcontext.gregs[REG_nPC] += 4; } sc.sc_sp = ucp->uc_mcontext.gregs[REG_SP]; sc.sc_pc = ucp->uc_mcontext.gregs[REG_PC]; sc.sc_npc = ucp->uc_mcontext.gregs[REG_nPC]; /* XX64 There is no REG_PSR for sparcv9, we map in REG_CCR for now */ #if defined(__sparcv9) sc.sc_psr = ucp->uc_mcontext.gregs[REG_CCR]; #else sc.sc_psr = ucp->uc_mcontext.gregs[REG_PSR]; #endif sc.sc_g1 = ucp->uc_mcontext.gregs[REG_G1]; sc.sc_o0 = ucp->uc_mcontext.gregs[REG_O0]; /* * XXX - What a kludge! * Store a pointer to the original ucontext_t in the sigcontext * so that it's available to the sigcleanup call that needs to * return from the signal handler. Otherwise, vital information * (e.g., the "out" registers) that's only saved in the * ucontext_t isn't available to sigcleanup. */ sc.sc_wbcnt = (int)(sizeof (*ucp)); sc.sc_spbuf[0] = (char *)(uintptr_t)sig; sc.sc_spbuf[1] = (char *)ucp; #ifdef NEVER /* * XXX - Sorry, we can never pass the saved register windows * on in the sigcontext because we use that space to save the * ucontext_t. */ if (ucp->uc_mcontext.gwins != (gwindows_t *)0) { int i, j; gwinswitch = 1; sc.sc_wbcnt = ucp->uc_mcontext.gwins->wbcnt; /* XXX - should use bcopy to move this in bulk */ for (i = 0; i < ucp->uc_mcontext.gwins; i++) { sc.sc_spbuf[i] = ucp->uc_mcontext.gwins->spbuf[i]; for (j = 0; j < 8; j++) sc.sc_wbuf[i][j] = ucp->uc_mcontext.gwins->wbuf[i].rw_local[j]; for (j = 0; j < 8; j++) sc.sc_wbuf[i][j+8] = ucp->uc_mcontext.gwins->wbuf[i].rw_in[j]; } } #endif #endif /* * Translate signal codes from new to old. * /usr/include/sys/siginfo.h contains new codes. * /usr/ucbinclude/sys/signal.h contains old codes. */ code = 0; addr = SIG_NOADDR; if (sip != NULL && SI_FROMKERNEL(sip)) { addr = sip->si_addr; switch (sig) { case SIGILL: switch (sip->si_code) { case ILL_PRVOPC: code = ILL_PRIVINSTR_FAULT; break; case ILL_BADSTK: code = ILL_STACK; break; case ILL_ILLTRP: code = ILL_TRAP_FAULT(sip->si_trapno); break; default: code = ILL_ILLINSTR_FAULT; break; } break; case SIGEMT: code = EMT_TAG; break; case SIGFPE: switch (sip->si_code) { case FPE_INTDIV: code = FPE_INTDIV_TRAP; break; case FPE_INTOVF: code = FPE_INTOVF_TRAP; break; case FPE_FLTDIV: code = FPE_FLTDIV_TRAP; break; case FPE_FLTOVF: code = FPE_FLTOVF_TRAP; break; case FPE_FLTUND: code = FPE_FLTUND_TRAP; break; case FPE_FLTRES: code = FPE_FLTINEX_TRAP; break; default: code = FPE_FLTOPERR_TRAP; break; } break; case SIGBUS: switch (sip->si_code) { case BUS_ADRALN: code = BUS_ALIGN; break; case BUS_ADRERR: code = BUS_HWERR; break; default: /* BUS_OBJERR */ code = FC_MAKE_ERR(sip->si_errno); break; } break; case SIGSEGV: switch (sip->si_code) { case SEGV_MAPERR: code = SEGV_NOMAP; break; case SEGV_ACCERR: code = SEGV_PROT; break; default: code = FC_MAKE_ERR(sip->si_errno); break; } break; default: addr = SIG_NOADDR; break; } } (*_siguhandler[sig])(sig, code, &sc, addr); if (sc.sc_onstack) ucp->uc_stack.ss_flags |= SS_ONSTACK; else ucp->uc_stack.ss_flags &= ~SS_ONSTACK; mask2set(sc.sc_mask, &ucp->uc_sigmask); #if defined(__sparc) ucp->uc_mcontext.gregs[REG_SP] = sc.sc_sp; ucp->uc_mcontext.gregs[REG_PC] = sc.sc_pc; ucp->uc_mcontext.gregs[REG_nPC] = sc.sc_npc; #if defined(__sparcv9) ucp->uc_mcontext.gregs[REG_CCR] = sc.sc_psr; #else ucp->uc_mcontext.gregs[REG_PSR] = sc.sc_psr; #endif ucp->uc_mcontext.gregs[REG_G1] = sc.sc_g1; ucp->uc_mcontext.gregs[REG_O0] = sc.sc_o0; #ifdef NEVER if (gwinswitch == 1) { int i, j; ucp->uc_mcontext.gwins->wbcnt = sc.sc_wbcnt; /* XXX - should use bcopy to move this in bulk */ for (i = 0; i < sc.sc_wbcnt; i++) { ucp->uc_mcontext.gwins->spbuf[i] = sc.sc_spbuf[i]; for (j = 0; j < 8; j++) ucp->uc_mcontext.gwins->wbuf[i].rw_local[j] = sc.sc_wbuf[i][j]; for (j = 0; j < 8; j++) ucp->uc_mcontext.gwins->wbuf[i].rw_in[j] = sc.sc_wbuf[i][j+8]; } } #endif if (sig == SIGFPE) { if (ucp->uc_mcontext.fpregs.fpu_qcnt > 0) { ucp->uc_mcontext.fpregs.fpu_qcnt--; ucp->uc_mcontext.fpregs.fpu_q++; } } #endif (void) setcontext(ucp); }
/* * Common code for calling the user-specified signal handler. */ void call_user_handler(int sig, siginfo_t *sip, ucontext_t *ucp) { ulwp_t *self = curthread; uberdata_t *udp = self->ul_uberdata; struct sigaction uact; volatile struct sigaction *sap; /* * If we are taking a signal while parked or about to be parked * on __lwp_park() then remove ourself from the sleep queue so * that we can grab locks. The code in mutex_lock_queue() and * cond_wait_common() will detect this and deal with it when * __lwp_park() returns. */ unsleep_self(); set_parking_flag(self, 0); if (__td_event_report(self, TD_CATCHSIG, udp)) { self->ul_td_evbuf.eventnum = TD_CATCHSIG; self->ul_td_evbuf.eventdata = (void *)(intptr_t)sig; tdb_event(TD_CATCHSIG, udp); } /* * Get a self-consistent set of flags, handler, and mask * while holding the sig's sig_lock for the least possible time. * We must acquire the sig's sig_lock because some thread running * in sigaction() might be establishing a new signal handler. * The code in sigaction() acquires the writer lock; here * we acquire the readers lock to ehance concurrency in the * face of heavy signal traffic, such as generated by java. * * Locking exceptions: * No locking for a child of vfork(). * If the signal is SIGPROF with an si_code of PROF_SIG, * then we assume that this signal was generated by * setitimer(ITIMER_REALPROF) set up by the dbx collector. * If the signal is SIGEMT with an si_code of EMT_CPCOVF, * then we assume that the signal was generated by * a hardware performance counter overflow. * In these cases, assume that we need no locking. It is the * monitoring program's responsibility to ensure correctness. */ sap = &udp->siguaction[sig].sig_uaction; if (self->ul_vfork || (sip != NULL && ((sig == SIGPROF && sip->si_code == PROF_SIG) || (sig == SIGEMT && sip->si_code == EMT_CPCOVF)))) { /* we wish this assignment could be atomic */ (void) memcpy(&uact, (void *)sap, sizeof (uact)); } else { rwlock_t *rwlp = &udp->siguaction[sig].sig_lock; lrw_rdlock(rwlp); (void) memcpy(&uact, (void *)sap, sizeof (uact)); if ((sig == SIGCANCEL || sig == SIGAIOCANCEL) && (sap->sa_flags & SA_RESETHAND)) sap->sa_sigaction = SIG_DFL; lrw_unlock(rwlp); } /* * Set the proper signal mask and call the user's signal handler. * (We overrode the user-requested signal mask with maskset * so we currently have all blockable signals blocked.) * * We would like to ASSERT() that the signal is not a member of the * signal mask at the previous level (ucp->uc_sigmask) or the specified * signal mask for sigsuspend() or pollsys() (self->ul_tmpmask) but * /proc can override this via PCSSIG, so we don't bother. * * We would also like to ASSERT() that the signal mask at the previous * level equals self->ul_sigmask (maskset for sigsuspend() / pollsys()), * but /proc can change the thread's signal mask via PCSHOLD, so we * don't bother with that either. */ ASSERT(ucp->uc_flags & UC_SIGMASK); if (self->ul_sigsuspend) { ucp->uc_sigmask = self->ul_sigmask; self->ul_sigsuspend = 0; /* the sigsuspend() or pollsys() signal mask */ sigorset(&uact.sa_mask, &self->ul_tmpmask); } else { /* the signal mask at the previous level */ sigorset(&uact.sa_mask, &ucp->uc_sigmask); } if (!(uact.sa_flags & SA_NODEFER)) /* add current signal */ (void) sigaddset(&uact.sa_mask, sig); self->ul_sigmask = uact.sa_mask; self->ul_siglink = ucp; (void) __lwp_sigmask(SIG_SETMASK, &uact.sa_mask); /* * If this thread has been sent SIGCANCEL from the kernel * or from pthread_cancel(), it is being asked to exit. * The kernel may send SIGCANCEL without a siginfo struct. * If the SIGCANCEL is process-directed (from kill() or * sigqueue()), treat it as an ordinary signal. */ if (sig == SIGCANCEL) { if (sip == NULL || SI_FROMKERNEL(sip) || sip->si_code == SI_LWP) { do_sigcancel(); goto out; } /* SIGCANCEL is ignored by default */ if (uact.sa_sigaction == SIG_DFL || uact.sa_sigaction == SIG_IGN) goto out; } /* * If this thread has been sent SIGAIOCANCEL (SIGLWP) and * we are an aio worker thread, cancel the aio request. */ if (sig == SIGAIOCANCEL) { aio_worker_t *aiowp = pthread_getspecific(_aio_key); if (sip != NULL && sip->si_code == SI_LWP && aiowp != NULL) siglongjmp(aiowp->work_jmp_buf, 1); /* SIGLWP is ignored by default */ if (uact.sa_sigaction == SIG_DFL || uact.sa_sigaction == SIG_IGN) goto out; } if (!(uact.sa_flags & SA_SIGINFO)) sip = NULL; __sighndlr(sig, sip, ucp, uact.sa_sigaction); #if defined(sparc) || defined(__sparc) /* * If this is a floating point exception and the queue * is non-empty, pop the top entry from the queue. This * is to maintain expected behavior. */ if (sig == SIGFPE && ucp->uc_mcontext.fpregs.fpu_qcnt) { fpregset_t *fp = &ucp->uc_mcontext.fpregs; if (--fp->fpu_qcnt > 0) { unsigned char i; struct fq *fqp; fqp = fp->fpu_q; for (i = 0; i < fp->fpu_qcnt; i++) fqp[i] = fqp[i+1]; } } #endif /* sparc */ out: (void) setcontext(ucp); thr_panic("call_user_handler(): setcontext() returned"); }
static ATTRIBUTE_NO_SANITIZE_THREAD void backtrace_sigaction(int signum, siginfo_t *info, void* ptr) { void *array[42]; size_t size; void * caller_address; ucontext_t *uc = (ucontext_t *) ptr; int gdb_pid = -1; /* get all entries on the stack */ size = backtrace(array, 42); /* Get the address at the time the signal was raised */ #if defined(REG_RIP) caller_address = (void *) uc->uc_mcontext.gregs[REG_RIP]; #elif defined(REG_EIP) caller_address = (void *) uc->uc_mcontext.gregs[REG_EIP]; #elif defined(__arm__) caller_address = (void *) uc->uc_mcontext.arm_pc; #elif defined(__mips__) caller_address = (void *) uc->uc_mcontext.sc_pc; #elif defined(REG_PC) && defined(__e2k__) caller_address = (void *) ((mcontext_t*) &uc->uc_mcontext)->mc_gregs[REG_PC]; #else /* TODO support more arch(s) */ # warning Unsupported architecture. caller_address = info->si_addr; #endif int should_die = 0; switch(info->si_signo) { case SIGXCPU: case SIGXFSZ: should_die = 1; break; case SIGABRT: case SIGALRM: if (info->si_pid == getpid()) should_die = 1; case SIGBUS: case SIGFPE: case SIGILL: case SIGSEGV: #ifndef SI_FROMKERNEL # define SI_FROMKERNEL(info) (info->si_code > 0) #endif if (SI_FROMKERNEL(info)) should_die = 1; } char time_buf[64]; time_t t = ldap_time_unsteady(); strftime(time_buf, sizeof(time_buf), "%F-%H%M%S", localtime(&t)); char name_buf[PATH_MAX]; int fd = -1; #ifdef snprintf # undef snprintf #endif if (snprintf(name_buf, sizeof(name_buf), "%s/slapd-backtrace.%s-%i.log%c", backtrace_homedir ? backtrace_homedir : ".", time_buf, getpid(), 0) > 0) fd = open(name_buf, O_CREAT | O_EXCL | O_WRONLY | O_APPEND, 0644); if (fd < 0) { if (backtrace_homedir) fd = open(strrchr(name_buf, '/') + 1, O_CREAT | O_EXCL | O_WRONLY | O_APPEND, 0644); if (fd < 0) fd = STDERR_FILENO; dprintf(fd, "\n\n*** Unable create \"%s\": %s!", name_buf, STRERROR(errno)); } dprintf(fd, "\n\n*** Signal %d (%s), address is %p from %p\n", signum, strsignal(signum), info->si_addr, (void *)caller_address); int n = readlink("/proc/self/exe", name_buf, sizeof(name_buf) - 1); if (n > 0) { name_buf[n] = 0; dprintf(fd, " Executable file %s\n", name_buf); } else { dprintf(fd, " Unable read executable name: %s\n", STRERROR(errno)); strcpy(name_buf, "unknown"); } void** actual = array; int frame = 0; for (n = 0; n < size; ++n) if (array[n] == caller_address) { frame = n; actual = array + frame; size -= frame; break; } dprintf(fd, "\n*** Backtrace by glibc:\n"); backtrace_symbols_fd(actual, size, fd); int mem_map_fd = open("/proc/self/smaps", O_RDONLY, 0); if (mem_map_fd >= 0) { char buf[1024]; dprintf(fd, "\n*** Memory usage map (by /proc/self/smaps):\n"); while(1) { int n = read(mem_map_fd, &buf, sizeof(buf)); if (n < 1) break; if (write(fd, &buf, n) != n) break; } close(mem_map_fd); } /* avoid ECHILD from waitpid() */ signal(SIGCHLD, SIG_DFL); dprintf(fd, "\n*** Backtrace by addr2line:\n"); for(n = 0; n < size; ++n) { int status = EXIT_FAILURE, child_pid = fork(); if (! child_pid) { char addr_buf[64]; close(STDIN_FILENO); dup2(fd, STDOUT_FILENO); close(fd); dprintf(STDOUT_FILENO, "(%d) %p: ", n, actual[n]); sprintf(addr_buf, "%p", actual[n]); execlp("addr2line", "addr2line", addr_buf, "-C", "-f", "-i", #if __GLIBC_PREREQ(2,14) "-p", /* LY: not available on RHEL6, guest by glibc version */ #endif "-e", name_buf, NULL); exit(EXIT_FAILURE); } else if (child_pid < 0) { dprintf(fd, "\n*** Unable complete backtrace by addr2line, sorry (%s, %d).\n", "fork", errno); break; } else if (waitpid(child_pid, &status, 0) < 0 || status != W_EXITCODE(EXIT_SUCCESS, 0)) { dprintf(fd, "\n*** Unable complete backtrace by addr2line, sorry (%s, pid %d, errno %d, status 0x%x).\n", "waitpid", child_pid, errno, status); break; } } if (is_debugger_present()) { dprintf(fd, "*** debugger already present\n"); goto ballout; } int retry_by_return = 0; if (should_die && SI_FROMKERNEL(info)) { /* LY: Expect kernel kill us again, * therefore for switch to 'guilty' thread and we may just return, * instead of sending SIGTRAP and later switch stack frame by GDB. */ retry_by_return = 1; } if (is_valgrind_present()) { dprintf(fd, "*** valgrind present, skip backtrace by gdb\n"); goto ballout; } int pipe_fd[2]; if (pipe(pipe_fd)) { pipe_fd[0] = pipe_fd[1] = -1; goto ballout; } gdb_is_ready_for_backtrace = 0; pid_t tid = syscall(SYS_gettid); gdb_pid = fork(); if (!gdb_pid) { char pid_buf[16]; sprintf(pid_buf, "%d", getppid()); dup2(pipe_fd[0], STDIN_FILENO); close(pipe_fd[1]); pipe_fd[0] = pipe_fd[1] =-1; dup2(fd, STDOUT_FILENO); dup2(fd, STDERR_FILENO); for(fd = getdtablesize(); fd > STDERR_FILENO; --fd) close(fd); setsid(); setpgid(0, 0); dprintf(STDOUT_FILENO, "\n*** Backtrace by GDB " #if GDB_SWITCH2GUILTY_THREAD "(pid %s, LWP %i, frame #%d):\n", pid_buf, tid, frame); #else "(pid %s, LWP %i, please find frame manually):\n", pid_buf, tid); #endif execlp("gdb", "gdb", "-q", "-se", name_buf, "-n", NULL); kill(getppid(), SIGKILL); dprintf(STDOUT_FILENO, "\n*** Sorry, GDB launch failed: %s\n", STRERROR(errno)); fsync(STDOUT_FILENO); exit(EXIT_FAILURE); }