/* * Read the current time from the clock chip and convert to UNIX form. * Assumes that the year in the clock chip is valid. * Must be called with tod_lock held. */ static timestruc_t todm5819_get(void) { int i; timestruc_t ts; struct rtc_t rtc; ASSERT(MUTEX_HELD(&tod_lock)); /* * Read from the tod, and if it isnt accessible wait * before retrying. */ for (i = 0; i < TODM5819_UIP_RETRY_THRESH; i++) { if (read_rtc(&rtc)) break; drv_usecwait(TODM5819_UIP_WAIT_USEC); } if (i == TODM5819_UIP_RETRY_THRESH) { /* * We couldnt read from the tod */ tod_fault_reset(); return (hrestime); } DPRINTF("todm5819_get: century=%d year=%d dom=%d hrs=%d\n", rtc.rtc_century, rtc.rtc_year, rtc.rtc_dom, rtc.rtc_hrs); ts.tv_sec = tod_to_utc(rtc_to_tod(&rtc)); ts.tv_nsec = 0; return (ts); }
void cpr_tod_fault_reset(void) { mutex_enter(&tod_lock); tod_fault_reset(); mutex_exit(&tod_lock); }
void tod_set(timestruc_t ts) { /* * Prevent false alarm in tod_validate() due to tod value change. */ tod_fault_reset(); tod_ops.tod_set(ts); }
static timestruc_t todds1307_get(void) { timestruc_t ts; todinfo_t tod; struct rtc_t rtc; ASSERT(MUTEX_HELD(&tod_lock)); if (sync_clock_once) { todds1307_read_rtc(&soft_rtc); sync_clock_once = 0; } else { tod_fault_reset(); return (hrestime); } bcopy(&soft_rtc, &rtc, sizeof (rtc)); /* * 00 - 68 = 2000 thru 2068 * 69-99 = 1969 thru 1999 */ tod.tod_year = rtc.rtc_year; if (rtc.rtc_year <= 68) tod.tod_year += 100; tod.tod_month = rtc.rtc_mon; tod.tod_day = rtc.rtc_dom; tod.tod_dow = rtc.rtc_dow; tod.tod_hour = rtc.rtc_hrs; tod.tod_min = rtc.rtc_min; tod.tod_sec = rtc.rtc_sec; ts.tv_sec = tod_to_utc(tod); ts.tv_nsec = 0; return (ts); }
void dr_resume(dr_sr_handle_t *srh) { dr_handle_t *handle; handle = srh->sr_dr_handlep; if (srh->sr_suspend_state < DR_SRSTATE_FULL) { /* * Update the signature block. * If cpus are not paused, this can be done now. * See comments below. */ CPU_SIGNATURE(OS_SIG, SIGST_RESUME_INPROGRESS, SIGSUBST_NULL, CPU->cpu_id); } switch (srh->sr_suspend_state) { case DR_SRSTATE_FULL: ASSERT(MUTEX_HELD(&cpu_lock)); /* * Prevent false alarm in tod_validate() due to tod * value change between suspend and resume */ mutex_enter(&tod_lock); tod_fault_reset(); mutex_exit(&tod_lock); dr_enable_intr(); /* enable intr & clock */ start_cpus(); mutex_exit(&cpu_lock); /* * Update the signature block. * This must not be done while cpus are paused, since on * Starcat the cpu signature update aquires an adaptive * mutex in the iosram driver. Blocking with cpus paused * can lead to deadlock. */ CPU_SIGNATURE(OS_SIG, SIGST_RESUME_INPROGRESS, SIGSUBST_NULL, CPU->cpu_id); /* * If we suspended hw watchdog at suspend, * re-enable it now. */ if (srh->sr_flags & (SR_FLAG_WATCHDOG)) { mutex_enter(&tod_lock); tod_ops.tod_set_watchdog_timer( watchdog_timeout_seconds); mutex_exit(&tod_lock); } /* * This should only be called if drmach_suspend_last() * was called and state transitioned to DR_SRSTATE_FULL * to prevent resume attempts on device instances that * were not previously suspended. */ drmach_resume_first(); /* FALLTHROUGH */ case DR_SRSTATE_DRIVER: /* * resume drivers */ srh->sr_err_idx = 0; /* no parent dip to hold busy */ dr_resume_devices(ddi_root_node(), srh); if (srh->sr_err_idx && srh->sr_dr_handlep) { (srh->sr_dr_handlep)->h_err = drerr_int(ESBD_RESUME, srh->sr_err_ints, srh->sr_err_idx, 1); } /* * resume the lock manager */ lm_cprresume(); /* FALLTHROUGH */ case DR_SRSTATE_USER: /* * finally, resume user threads */ if (!dr_skip_user_threads) { prom_printf("DR: resuming user threads...\n"); dr_start_user_threads(); } /* FALLTHROUGH */ case DR_SRSTATE_BEGIN: default: /* * let those who care know that we've just resumed */ PR_QR("sending SIGTHAW...\n"); dr_signal_user(SIGTHAW); break; } i_ndi_allow_device_tree_changes(handle->h_ndi); /* * update the signature block */ CPU_SIGNATURE(OS_SIG, SIGST_RUN, SIGSUBST_NULL, CPU->cpu_id); prom_printf("DR: resume COMPLETED\n"); }
void sbdp_resume(sbdp_sr_handle_t *srh) { /* * update the signature block */ CPU_SIGNATURE(OS_SIG, SIGST_RESUME_INPROGRESS, SIGSUBST_NULL, CPU->cpu_id); switch (SR_STATE(srh)) { case SBDP_SRSTATE_FULL: ASSERT(MUTEX_HELD(&cpu_lock)); /* * Prevent false alarm in tod_validate() due to tod * value change between suspend and resume */ mutex_enter(&tod_lock); tod_fault_reset(); mutex_exit(&tod_lock); sbdp_enable_intr(); /* enable intr & clock */ /* * release all the other cpus * using start_cpus() vice sbdp_release_cpus() */ start_cpus(); mutex_exit(&cpu_lock); /* * If we suspended hw watchdog at suspend, * re-enable it now. */ if (SR_CHECK_FLAG(srh, SR_FLAG_WATCHDOG)) { mutex_enter(&tod_lock); tod_ops.tod_set_watchdog_timer( saved_watchdog_seconds); mutex_exit(&tod_lock); } /* FALLTHROUGH */ case SBDP_SRSTATE_DRIVER: /* * resume devices: root node doesn't have to * be held in any way. */ sbdp_resume_devices(ddi_root_node(), srh); /* * resume the lock manager */ lm_cprresume(); /* FALLTHROUGH */ case SBDP_SRSTATE_USER: /* * finally, resume user threads */ if (!sbdp_skip_user_threads) { SBDP_DBG_QR("DR: resuming user threads...\n"); sbdp_start_user_threads(); } /* FALLTHROUGH */ case SBDP_SRSTATE_BEGIN: default: /* * let those who care know that we've just resumed */ SBDP_DBG_QR("sending SIGTHAW...\n"); sbdp_signal_user(SIGTHAW); break; } /* * update the signature block */ CPU_SIGNATURE(OS_SIG, SIGST_RUN, SIGSUBST_NULL, CPU->cpu_id); SBDP_DBG_QR("DR: resume COMPLETED\n"); }