static bool ztex_prepare(struct thr_info *thr) { struct cgpu_info *cgpu = thr->cgpu; struct libztex_device *ztex = cgpu->device_ztex; { char *fpganame = malloc(LIBZTEX_SNSTRING_LEN+3+1); sprintf(fpganame, "%s-%u", ztex->snString, cgpu->proc_id+1); cgpu->name = fpganame; } ztex_selectFpga(ztex, cgpu->proc_id); if (libztex_configureFpga(ztex, cgpu->proc_repr) != 0) { libztex_resetFpga(ztex); ztex_releaseFpga(ztex); applog(LOG_ERR, "%"PRIpreprv": Disabling!", cgpu->proc_repr); thr->cgpu->deven = DEV_DISABLED; return true; } ztex->dclk.freqM = ztex->dclk.freqMaxM+1; //ztex_updateFreq(thr); libztex_setFreq(ztex, ztex->dclk.freqMDefault, cgpu->proc_repr); ztex_releaseFpga(ztex); notifier_init(thr->work_restart_notifier); applog(LOG_DEBUG, "%"PRIpreprv": prepare", cgpu->proc_repr); cgpu->status = LIFE_INIT2; return true; }
void ui_init(void) { log_info("Initialising UI"); initscr(); raw(); keypad(stdscr, TRUE); if (prefs_get_boolean(PREF_MOUSE)) { mousemask(ALL_MOUSE_EVENTS, NULL); mouseinterval(5); } ui_load_colours(); refresh(); create_title_bar(); create_status_bar(); status_bar_active(1); create_input_window(); wins_init(); cons_about(); notifier_init(); #ifdef HAVE_LIBXSS display = XOpenDisplay(0); #endif ui_idle_time = g_timer_new(); wins_refresh_current(); }
static int nhrp_if_new_hook(struct interface *ifp) { struct nhrp_interface *nifp; afi_t afi; nifp = XCALLOC(MTYPE_NHRP_IF, sizeof(struct nhrp_interface)); if (!nifp) return 0; ifp->info = nifp; nifp->ifp = ifp; notifier_init(&nifp->notifier_list); for (afi = 0; afi < AFI_MAX; afi++) { struct nhrp_afi_data *ad = &nifp->afi[afi]; ad->holdtime = NHRPD_DEFAULT_HOLDTIME; list_init(&ad->nhslist_head); } return 0; }
void monitor_init(uint32_t core_id) { #if TARGET_NUM_CORES > 1 /* Target-dependent master initialization */ if (core_id == TARGET_PRIMARY_CORE_ID) { target_primary_core_init(); } #endif /* TARGET_NUM_CORES > 1 */ /* Barrier: .bss and .data sections is initialization */ initialization_barrier(core_id); /* Set up the Monitor Vector Base Address Register (MVBAR) */ CP15_MVBAR_WRITE(&monitor_vector_table); /* Set up the Secure Configuration Register (SCR) for the T OS */ CP15_SCR_WRITE(MON_T_SCR); /* Set up the Non-Secure Access Control Register (NSACR) */ CP15_NSACR_WRITE(MON_NSACR); /* Target-dependent initialization */ target_init(core_id); /* Barrier: target-dependent initialization */ phase_barrier(core_id, 1); /* Initialize libraries in the primary core only */ if (core_id == TARGET_PRIMARY_CORE_ID) { syscalls_init(); notifier_init(); } /* Barrier: monitor initialization is finished */ phase_barrier(core_id, 2); }
void minerloop_queue(struct thr_info *thr) { struct thr_info *mythr; struct cgpu_info *cgpu = thr->cgpu; struct device_drv *api = cgpu->drv; struct timeval tv_now; struct timeval tv_timeout; struct cgpu_info *proc; bool should_be_running; struct work *work; if (thr->work_restart_notifier[1] == -1) notifier_init(thr->work_restart_notifier); while (likely(!cgpu->shutdown)) { tv_timeout.tv_sec = -1; timer_set_now(&tv_now); for (proc = cgpu; proc; proc = proc->next_proc) { mythr = proc->thr[0]; should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause); redo: if (should_be_running) { if (unlikely(!mythr->_last_sbr_state)) { mt_disable_finish(mythr); mythr->_last_sbr_state = should_be_running; } if (unlikely(mythr->work_restart)) { mythr->work_restart = false; do_queue_flush(mythr); } while (!mythr->queue_full) { if (mythr->next_work) { work = mythr->next_work; mythr->next_work = NULL; } else { request_work(mythr); // FIXME: Allow get_work to return NULL to retry on notification work = get_and_prepare_work(mythr); } if (!work) break; if (!api->queue_append(mythr, work)) mythr->next_work = work; } } else if (unlikely(mythr->_last_sbr_state)) { mythr->_last_sbr_state = should_be_running; do_queue_flush(mythr); } if (timer_passed(&mythr->tv_poll, &tv_now)) api->poll(mythr); should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause); if (should_be_running && !mythr->queue_full) goto redo; reduce_timeout_to(&tv_timeout, &mythr->tv_poll); } do_notifier_select(thr, &tv_timeout); } }
void minerloop_async(struct thr_info *mythr) { struct thr_info *thr = mythr; struct cgpu_info *cgpu = mythr->cgpu; struct device_drv *api = cgpu->drv; struct timeval tv_now; struct timeval tv_timeout; struct cgpu_info *proc; bool is_running, should_be_running; if (mythr->work_restart_notifier[1] == -1) notifier_init(mythr->work_restart_notifier); while (likely(!cgpu->shutdown)) { tv_timeout.tv_sec = -1; timer_set_now(&tv_now); for (proc = cgpu; proc; proc = proc->next_proc) { mythr = proc->thr[0]; // Nothing should happen while we're starting a job if (unlikely(mythr->busy_state == TBS_STARTING_JOB)) goto defer_events; is_running = mythr->work; should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause); if (should_be_running) { if (unlikely(!(is_running || mythr->_job_transition_in_progress))) { mt_disable_finish(mythr); goto djp; } if (unlikely(mythr->work_restart)) goto djp; } else // ! should_be_running { if (unlikely(is_running && !mythr->_job_transition_in_progress)) { disabled: ; mythr->tv_morework.tv_sec = -1; if (mythr->busy_state != TBS_GETTING_RESULTS) do_get_results(mythr, false); else // Avoid starting job when pending result fetch completes mythr->_proceed_with_new_job = false; } } if (timer_passed(&mythr->tv_morework, &tv_now)) { djp: ; if (!do_job_prepare(mythr, &tv_now)) goto disabled; } defer_events: if (timer_passed(&mythr->tv_poll, &tv_now)) api->poll(mythr); reduce_timeout_to(&tv_timeout, &mythr->tv_morework); reduce_timeout_to(&tv_timeout, &mythr->tv_poll); } do_notifier_select(thr, &tv_timeout); } }