static void dr_start_user_threads(void) { kthread_id_t tp; mutex_enter(&pidlock); /* walk all threads and release them */ for (tp = curthread->t_next; tp != curthread; tp = tp->t_next) { proc_t *p = ttoproc(tp); /* skip kernel threads */ if (ttoproc(tp)->p_as == &kas) continue; mutex_enter(&p->p_lock); tp->t_proc_flag &= ~TP_CHKPT; mutex_exit(&p->p_lock); thread_lock(tp); if (CPR_ISTOPPED(tp)) { /* back on the runq */ tp->t_schedflag |= TS_RESUME; setrun_locked(tp); } thread_unlock(tp); } mutex_exit(&pidlock); }
/* * Checks and makes sure all user threads are stopped */ static int cpr_check_user_threads() { kthread_id_t tp; int rc = 0; mutex_enter(&pidlock); tp = curthread->t_next; do { if (ttoproc(tp)->p_as == &kas || ttoproc(tp)->p_stat == SZOMB) continue; thread_lock(tp); /* * make sure that we are off all the queues and in a stopped * state. */ if (!CPR_ISTOPPED(tp)) { thread_unlock(tp); mutex_exit(&pidlock); if (count == CPR_UTSTOP_RETRY) { DEBUG1(errp("Suspend failed: cannt stop " "uthread\n")); cpr_err(CE_WARN, "Suspend cannot stop " "process %s (%p:%x).", ttoproc(tp)->p_user.u_psargs, (void *)tp, tp->t_state); cpr_err(CE_WARN, "Process may be waiting for" " network request, please try again."); } DEBUG2(errp("cant stop t=%p state=%x pfg=%x sched=%x\n", tp, tp->t_state, tp->t_proc_flag, tp->t_schedflag)); DEBUG2(errp("proc %p state=%x pid=%d\n", ttoproc(tp), ttoproc(tp)->p_stat, ttoproc(tp)->p_pidp->pid_id)); return (1); } thread_unlock(tp); } while ((tp = tp->t_next) != curthread && rc == 0); mutex_exit(&pidlock); return (0); }
/* * start all threads that were stopped for checkpoint. */ void cpr_start_user_threads() { kthread_id_t tp; proc_t *p; mutex_enter(&pidlock); tp = curthread->t_next; do { p = ttoproc(tp); /* * kernel threads are callback'ed rather than setrun. */ if (ttoproc(tp)->p_as == &kas) continue; /* * t_proc_flag should have been cleared. Just to make sure here */ mutex_enter(&p->p_lock); tp->t_proc_flag &= ~TP_CHKPT; mutex_exit(&p->p_lock); thread_lock(tp); if (CPR_ISTOPPED(tp)) { /* * put it back on the runq */ tp->t_schedflag |= TS_RESUME; setrun_locked(tp); } thread_unlock(tp); /* * DEBUG - Keep track of current and next thread pointer. */ } while ((tp = tp->t_next) != curthread); mutex_exit(&pidlock); }
/* ARGSUSED */ static int dr_stop_user_threads(dr_sr_handle_t *srh) { int count; int bailout; dr_handle_t *handle = srh->sr_dr_handlep; static fn_t f = "dr_stop_user_threads"; kthread_id_t tp; extern void add_one_utstop(); extern void utstop_timedwait(clock_t); extern void utstop_init(void); #define DR_UTSTOP_RETRY 4 #define DR_UTSTOP_WAIT hz if (dr_skip_user_threads) return (DDI_SUCCESS); utstop_init(); /* we need to try a few times to get past fork, etc. */ srh->sr_err_idx = 0; for (count = 0; count < DR_UTSTOP_RETRY; count++) { /* walk the entire threadlist */ mutex_enter(&pidlock); for (tp = curthread->t_next; tp != curthread; tp = tp->t_next) { proc_t *p = ttoproc(tp); /* handle kernel threads separately */ if (p->p_as == &kas || p->p_stat == SZOMB) continue; mutex_enter(&p->p_lock); thread_lock(tp); if (tp->t_state == TS_STOPPED) { /* add another reason to stop this thread */ tp->t_schedflag &= ~TS_RESUME; } else { tp->t_proc_flag |= TP_CHKPT; thread_unlock(tp); mutex_exit(&p->p_lock); add_one_utstop(); mutex_enter(&p->p_lock); thread_lock(tp); aston(tp); if (tp->t_state == TS_SLEEP && (tp->t_flag & T_WAKEABLE)) { setrun_locked(tp); } } /* grab thread if needed */ if (tp->t_state == TS_ONPROC && tp->t_cpu != CPU) poke_cpu(tp->t_cpu->cpu_id); thread_unlock(tp); mutex_exit(&p->p_lock); } mutex_exit(&pidlock); /* let everything catch up */ utstop_timedwait(count * count * DR_UTSTOP_WAIT); /* now, walk the threadlist again to see if we are done */ mutex_enter(&pidlock); for (tp = curthread->t_next, bailout = 0; tp != curthread; tp = tp->t_next) { proc_t *p = ttoproc(tp); /* handle kernel threads separately */ if (p->p_as == &kas || p->p_stat == SZOMB) continue; /* * If this thread didn't stop, and we don't allow * unstopped blocked threads, bail. */ thread_lock(tp); if (!CPR_ISTOPPED(tp) && !(dr_allow_blocked_threads && DR_VSTOPPED(tp))) { bailout = 1; if (count == DR_UTSTOP_RETRY - 1) { /* * save the pid for later reporting */ srh->sr_err_idx = dr_add_int(srh->sr_err_ints, srh->sr_err_idx, DR_MAX_ERR_INT, (uint64_t)p->p_pid); cmn_err(CE_WARN, "%s: " "failed to stop thread: " "process=%s, pid=%d", f, p->p_user.u_psargs, p->p_pid); PR_QR("%s: failed to stop thread: " "process=%s, pid=%d, t_id=0x%p, " "t_state=0x%x, t_proc_flag=0x%x, " "t_schedflag=0x%x\n", f, p->p_user.u_psargs, p->p_pid, tp, tp->t_state, tp->t_proc_flag, tp->t_schedflag); } } thread_unlock(tp); } mutex_exit(&pidlock); /* were all the threads stopped? */ if (!bailout) break; } /* were we unable to stop all threads after a few tries? */ if (bailout) { handle->h_err = drerr_int(ESBD_UTHREAD, srh->sr_err_ints, srh->sr_err_idx, 0); return (ESRCH); } return (DDI_SUCCESS); }
static void sbdp_resume_devices(dev_info_t *start, sbdp_sr_handle_t *srh) { int circ; dev_info_t *dip, *next, *last = NULL; char *bn; sbd_error_t *sep; sep = &srh->sep; /* attach in reverse device tree order */ while (last != start) { dip = start; next = ddi_get_next_sibling(dip); while (next != last && dip != SR_FAILED_DIP(srh)) { dip = next; next = ddi_get_next_sibling(dip); } if (dip == SR_FAILED_DIP(srh)) { /* Release hold acquired in sbdp_suspend_devices() */ ndi_rele_devi(dip); SR_FAILED_DIP(srh) = NULL; } else if (sbdp_is_real_device(dip) && SR_FAILED_DIP(srh) == NULL) { if (DEVI(dip)->devi_binding_name != NULL) { bn = ddi_binding_name(dip); } #ifdef DEBUG if (!sbdp_bypass_device(bn)) { #else { #endif char d_name[40], d_alias[40], *d_info; d_name[0] = 0; d_info = ddi_get_name_addr(dip); if (d_info == NULL) d_info = "<null>"; if (!sbdp_resolve_devname(dip, d_name, d_alias)) { if (d_alias[0] != 0) { SBDP_DBG_QR("\tresuming " "%s@%s (aka %s)\n", d_name, d_info, d_alias); } else { SBDP_DBG_QR("\tresuming " "%s@%s\n", d_name, d_info); } } else { SBDP_DBG_QR("\tresuming %s@%s\n", bn, d_info); } if (devi_attach(dip, DDI_RESUME) != DDI_SUCCESS) { /* * Print a console warning, * set an errno of ESGT_RESUME, * and save the driver major * number in the e_str. */ (void) sprintf(sbdp_get_err_buf(sep), "%s@%s", d_name[0] ? d_name : bn, d_info); SBDP_DBG_QR("\tFAILED to resume " "%s\n", sbdp_get_err_buf(sep)); sbdp_set_err(sep, ESGT_RESUME, NULL); } } } ndi_devi_enter(dip, &circ); sbdp_resume_devices(ddi_get_child(dip), srh); ndi_devi_exit(dip, circ); last = dip; } } /* * True if thread is virtually stopped. Similar to CPR_VSTOPPED * but from DR point of view. These user threads are waiting in * the kernel. Once they return from kernel, they will process * the stop signal and stop. */ #define SBDP_VSTOPPED(t) \ ((t)->t_state == TS_SLEEP && \ (t)->t_wchan != NULL && \ (t)->t_astflag && \ ((t)->t_proc_flag & TP_CHKPT)) static int sbdp_stop_user_threads(sbdp_sr_handle_t *srh) { int count; char cache_psargs[PSARGSZ]; kthread_id_t cache_tp; uint_t cache_t_state; int bailout; sbd_error_t *sep; kthread_id_t tp; extern void add_one_utstop(); extern void utstop_timedwait(clock_t); extern void utstop_init(void); #define SBDP_UTSTOP_RETRY 4 #define SBDP_UTSTOP_WAIT hz if (sbdp_skip_user_threads) return (DDI_SUCCESS); sep = &srh->sep; ASSERT(sep); utstop_init(); /* we need to try a few times to get past fork, etc. */ for (count = 0; count < SBDP_UTSTOP_RETRY; count++) { /* walk the entire threadlist */ mutex_enter(&pidlock); for (tp = curthread->t_next; tp != curthread; tp = tp->t_next) { proc_t *p = ttoproc(tp); /* handle kernel threads separately */ if (p->p_as == &kas || p->p_stat == SZOMB) continue; mutex_enter(&p->p_lock); thread_lock(tp); if (tp->t_state == TS_STOPPED) { /* add another reason to stop this thread */ tp->t_schedflag &= ~TS_RESUME; } else { tp->t_proc_flag |= TP_CHKPT; thread_unlock(tp); mutex_exit(&p->p_lock); add_one_utstop(); mutex_enter(&p->p_lock); thread_lock(tp); aston(tp); if (ISWAKEABLE(tp) || ISWAITING(tp)) { setrun_locked(tp); } } /* grab thread if needed */ if (tp->t_state == TS_ONPROC && tp->t_cpu != CPU) poke_cpu(tp->t_cpu->cpu_id); thread_unlock(tp); mutex_exit(&p->p_lock); } mutex_exit(&pidlock); /* let everything catch up */ utstop_timedwait(count * count * SBDP_UTSTOP_WAIT); /* now, walk the threadlist again to see if we are done */ mutex_enter(&pidlock); for (tp = curthread->t_next, bailout = 0; tp != curthread; tp = tp->t_next) { proc_t *p = ttoproc(tp); /* handle kernel threads separately */ if (p->p_as == &kas || p->p_stat == SZOMB) continue; /* * If this thread didn't stop, and we don't allow * unstopped blocked threads, bail. */ thread_lock(tp); if (!CPR_ISTOPPED(tp) && !(sbdp_allow_blocked_threads && SBDP_VSTOPPED(tp))) { /* nope, cache the details for later */ bcopy(p->p_user.u_psargs, cache_psargs, sizeof (cache_psargs)); cache_tp = tp; cache_t_state = tp->t_state; bailout = 1; } thread_unlock(tp); } mutex_exit(&pidlock); /* were all the threads stopped? */ if (!bailout) break; } /* were we unable to stop all threads after a few tries? */ if (bailout) { cmn_err(CE_NOTE, "process: %s id: %p state: %x\n", cache_psargs, cache_tp, cache_t_state); (void) sprintf(sbdp_get_err_buf(sep), "%s", cache_psargs); sbdp_set_err(sep, ESGT_UTHREAD, NULL); return (ESRCH); } return (DDI_SUCCESS); }