Beispiel #1
0
/* A generic wait function for threads that poll that will wait a specified
 * time tdiff waiting on a work restart request. Returns zero if the condition
 * was met (work restart requested) or ETIMEDOUT if not.
 */
int restart_wait(struct thr_info *thr, unsigned int mstime)
{
	struct timeval tv_timer, tv_now, tv_timeout;
	fd_set rfds;
	SOCKETTYPE wrn = thr->work_restart_notifier[0];
	int rv;
	
	if (unlikely(thr->work_restart_notifier[1] == INVSOCK))
	{
		// This is a bug!
		applog(LOG_ERR, "%"PRIpreprv": restart_wait called without a work_restart_notifier", thr->cgpu->proc_repr);
		cgsleep_ms(mstime);
		return (thr->work_restart ? 0 : ETIMEDOUT);
	}
	
	timer_set_now(&tv_now);
	timer_set_delay(&tv_timer, &tv_now, mstime * 1000);
	while (true)
	{
		FD_ZERO(&rfds);
		FD_SET(wrn, &rfds);
		tv_timeout = tv_timer;
		rv = select(wrn + 1, &rfds, NULL, NULL, select_timeout(&tv_timeout, &tv_now));
		if (rv == 0)
			return ETIMEDOUT;
		if (rv > 0)
		{
			if (thr->work_restart)
				return 0;
			notifier_read(thr->work_restart_notifier);
		}
		timer_set_now(&tv_now);
	}
}
Beispiel #2
0
// Algo benchmark, crash-prone, system independent stage
double bench_algo_stage3(
	enum sha256_algos algo
)
{
	// Use a random work block pulled from a pool
	static uint8_t bench_block[] = { CGMINER_BENCHMARK_BLOCK };
	struct work work __attribute__((aligned(128)));
	unsigned char hash1[64];

	size_t bench_size = sizeof(work);
	size_t work_size = sizeof(bench_block);
	size_t min_size = (work_size < bench_size ? work_size : bench_size);
	memset(&work, 0, sizeof(work));
	memcpy(&work, &bench_block, min_size);

	static struct thr_info dummy;

	struct timeval end;
	struct timeval start;
	uint32_t max_nonce = opt_algo == ALGO_FASTAUTO ? (1<<8) : (1<<22);
	uint32_t last_nonce = 0;

	memcpy(&hash1[0], &hash1_init[0], sizeof(hash1));

	timer_set_now(&start);
			{
				sha256_func func = sha256_funcs[algo];
				(*func)(
					&dummy,
					work.midstate,
					work.data,
					hash1,
					work.hash,
					work.target,
					max_nonce,
					&last_nonce,
					work.blk.nonce
				);
			}
	timer_set_now(&end);

	uint64_t usec_end = ((uint64_t)end.tv_sec)*1000*1000 + end.tv_usec;
	uint64_t usec_start = ((uint64_t)start.tv_sec)*1000*1000 + start.tv_usec;
	uint64_t usec_elapsed = usec_end - usec_start;

	double rate = -1.0;
	if (0<usec_elapsed) {
		rate = (1.0*(last_nonce+1))/usec_elapsed;
	}
	return rate;
}
Beispiel #3
0
void display_enable(bool enable)
{
	uint8_t i;
	struct sseg_digit_data *ddata;

	if (enable == display.enabled)
		return;

	display.enabled = enable;
	if (enable) {
		for (i = 0; i < DISPLAY_NR_DIGITS; i++) {
			ddata = &display.digit_data[i].sseg;

			ddata->iomap = &digit_iomaps[i];
			sseg_init(ddata);
		}
		timer_set_now(&display.mux_timer);
	} else {
		for (i = 0; i < DISPLAY_NR_DIGITS; i++) {
			ddata = &display.digit_data[i].sseg;

			sseg_exit(ddata);
		}
	}
}
Beispiel #4
0
void job_start_complete(struct thr_info *mythr)
{
	struct timeval tv_now;
	
	timer_set_now(&tv_now);
	
	do_process_results(mythr, &tv_now, mythr->prev_work, false);
}
Beispiel #5
0
static void do_set_enabled(bool enabled)
{
	contrtemp.enabled = enabled;

	/* Reset the temp controller. */
	pid_reset(&contrtemp.pid);
	timer_set_now(&contrtemp.dt_timer);
	contrtemp_set_boost_mode(TEMPBOOST_NORMAL);
	/* Reset current controller to no-current. */
	contrcurr_set_setpoint(float_to_fixpt(CONTRCURR_NEGLIM));
}
Beispiel #6
0
void job_results_fetched(struct thr_info *mythr)
{
	if (mythr->_proceed_with_new_job)
		do_job_start(mythr);
	else
	{
		struct timeval tv_now;
		
		timer_set_now(&tv_now);
		
		do_process_results(mythr, &tv_now, mythr->prev_work, true);
	}
}
// Algo benchmark, crash-prone, system independent stage
double bench_algo_stage3(
	enum sha256_algos algo
)
{
	struct work work __attribute__((aligned(128)));

	get_benchmark_work(&work, false);

	static struct thr_info dummy;

	struct timeval end;
	struct timeval start;
	uint32_t max_nonce = opt_algo == ALGO_FASTAUTO ? (1<<8) : (1<<22);
	uint32_t last_nonce = 0;

	timer_set_now(&start);
			{
				sha256_func func = sha256_funcs[algo];
				(*func)(
					&dummy,
					&work,
					max_nonce,
					&last_nonce,
					0
				);
			}
	timer_set_now(&end);

	uint64_t usec_end = ((uint64_t)end.tv_sec)*1000*1000 + end.tv_usec;
	uint64_t usec_start = ((uint64_t)start.tv_sec)*1000*1000 + start.tv_usec;
	uint64_t usec_elapsed = usec_end - usec_start;

	double rate = -1.0;
	if (0<usec_elapsed) {
		rate = (1.0*(last_nonce+1))/usec_elapsed;
	}
	return rate;
}
Beispiel #8
0
void mt_job_transition(struct thr_info *mythr)
{
	struct timeval tv_now;
	
	timer_set_now(&tv_now);
	
	if (mythr->starting_next_work)
	{
		mythr->next_work->tv_work_start = tv_now;
		if (mythr->prev_work)
			free_work(mythr->prev_work);
		mythr->prev_work = mythr->work;
		mythr->work = mythr->next_work;
		mythr->next_work = NULL;
	}
	mythr->tv_jobstart = tv_now;
	mythr->_job_transition_in_progress = false;
}
Beispiel #9
0
static void contrtemp_run(fixpt_t r)
{
	fixpt_t dt, y, y_current;
	uint8_t emergency_flags;

	if (!contrtemp.enabled)
		return;
	if (contrtemp.emergency)
		return;

	/* Get delta-t that elapsed since last run, in seconds */
	dt = fixpt_div(int_to_fixpt(timer_ms_since(&contrtemp.dt_timer)),
		       int_to_fixpt(1000));
	timer_set_now(&contrtemp.dt_timer);

	/* Run the PID controller */
	y = pid_run(&contrtemp.pid, dt, r);

	debug_report_fixpt(DEBUG_PFX1("ty1"), &contrtemp.old_temp_control1, y);

	/* Map the requested temperature to a heater current. */
	y_current = temp_to_amps(y);

	emergency_flags = contrcurr_get_emerg();
	if (r > float_to_fixpt(CONTRTEMP_POSLIM)) {
		/* The measured temperature is higher than the maximum.
		 * We need to avoid damage.
		 * Disable current by requesting an emergency in
		 * the current controller.
		 */
		emergency_flags |= CONTRCURR_EMERG_HIGH_TEMP;
		y_current = float_to_fixpt(CONTRCURR_NEGLIM);
	} else {
		emergency_flags &= (uint8_t)~CONTRCURR_EMERG_HIGH_TEMP;
	}
	contrcurr_set_emerg(emergency_flags);

	debug_report_fixpt(DEBUG_PFX1("ty2"), &contrtemp.old_temp_control2,
			   y_current);

	/* Set the current controller setpoint to the requested current. */
	contrcurr_set_setpoint(y_current);
}
Beispiel #10
0
static
void do_notifier_select(struct thr_info *thr, struct timeval *tvp_timeout)
{
	struct cgpu_info *cgpu = thr->cgpu;
	struct timeval tv_now;
	int maxfd;
	fd_set rfds;
	
	timer_set_now(&tv_now);
	FD_ZERO(&rfds);
	FD_SET(thr->notifier[0], &rfds);
	maxfd = thr->notifier[0];
	FD_SET(thr->work_restart_notifier[0], &rfds);
	set_maxfd(&maxfd, thr->work_restart_notifier[0]);
	if (thr->mutex_request[1] != INVSOCK)
	{
		FD_SET(thr->mutex_request[0], &rfds);
		set_maxfd(&maxfd, thr->mutex_request[0]);
	}
	if (select(maxfd + 1, &rfds, NULL, NULL, select_timeout(tvp_timeout, &tv_now)) < 0)
		return;
	if (thr->mutex_request[1] != INVSOCK && FD_ISSET(thr->mutex_request[0], &rfds))
	{
		// FIXME: This can only handle one request at a time!
		pthread_mutex_t *mutexp = &cgpu->device_mutex;
		notifier_read(thr->mutex_request);
		mutex_lock(mutexp);
		pthread_cond_signal(&cgpu->device_cond);
		pthread_cond_wait(&cgpu->device_cond, mutexp);
		mutex_unlock(mutexp);
	}
	if (FD_ISSET(thr->notifier[0], &rfds)) {
		notifier_read(thr->notifier);
	}
	if (FD_ISSET(thr->work_restart_notifier[0], &rfds))
		notifier_read(thr->work_restart_notifier);
}
Beispiel #11
0
void minerloop_queue(struct thr_info *thr)
{
	struct thr_info *mythr;
	struct cgpu_info *cgpu = thr->cgpu;
	struct device_drv *api = cgpu->drv;
	struct timeval tv_now;
	struct timeval tv_timeout;
	struct cgpu_info *proc;
	bool should_be_running;
	struct work *work;
	
	if (thr->work_restart_notifier[1] == -1)
		notifier_init(thr->work_restart_notifier);
	
	while (likely(!cgpu->shutdown)) {
		tv_timeout.tv_sec = -1;
		timer_set_now(&tv_now);
		for (proc = cgpu; proc; proc = proc->next_proc)
		{
			mythr = proc->thr[0];
			
			should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
redo:
			if (should_be_running)
			{
				if (unlikely(!mythr->_last_sbr_state))
				{
					mt_disable_finish(mythr);
					mythr->_last_sbr_state = should_be_running;
				}
				
				if (unlikely(mythr->work_restart))
				{
					mythr->work_restart = false;
					do_queue_flush(mythr);
				}
				
				while (!mythr->queue_full)
				{
					if (mythr->next_work)
					{
						work = mythr->next_work;
						mythr->next_work = NULL;
					}
					else
					{
						request_work(mythr);
						// FIXME: Allow get_work to return NULL to retry on notification
						work = get_and_prepare_work(mythr);
					}
					if (!work)
						break;
					if (!api->queue_append(mythr, work))
						mythr->next_work = work;
				}
			}
			else
			if (unlikely(mythr->_last_sbr_state))
			{
				mythr->_last_sbr_state = should_be_running;
				do_queue_flush(mythr);
			}
			
			if (timer_passed(&mythr->tv_poll, &tv_now))
				api->poll(mythr);
			
			should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
			if (should_be_running && !mythr->queue_full)
				goto redo;
			
			reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
		}
		
		do_notifier_select(thr, &tv_timeout);
	}
}
Beispiel #12
0
void minerloop_async(struct thr_info *mythr)
{
	struct thr_info *thr = mythr;
	struct cgpu_info *cgpu = mythr->cgpu;
	struct device_drv *api = cgpu->drv;
	struct timeval tv_now;
	struct timeval tv_timeout;
	struct cgpu_info *proc;
	bool is_running, should_be_running;
	
	if (mythr->work_restart_notifier[1] == -1)
		notifier_init(mythr->work_restart_notifier);
	
	while (likely(!cgpu->shutdown)) {
		tv_timeout.tv_sec = -1;
		timer_set_now(&tv_now);
		for (proc = cgpu; proc; proc = proc->next_proc)
		{
			mythr = proc->thr[0];
			
			// Nothing should happen while we're starting a job
			if (unlikely(mythr->busy_state == TBS_STARTING_JOB))
				goto defer_events;
			
			is_running = mythr->work;
			should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
			
			if (should_be_running)
			{
				if (unlikely(!(is_running || mythr->_job_transition_in_progress)))
				{
					mt_disable_finish(mythr);
					goto djp;
				}
				if (unlikely(mythr->work_restart))
					goto djp;
			}
			else  // ! should_be_running
			{
				if (unlikely(is_running && !mythr->_job_transition_in_progress))
				{
disabled: ;
					mythr->tv_morework.tv_sec = -1;
					if (mythr->busy_state != TBS_GETTING_RESULTS)
						do_get_results(mythr, false);
					else
						// Avoid starting job when pending result fetch completes
						mythr->_proceed_with_new_job = false;
				}
			}
			
			if (timer_passed(&mythr->tv_morework, &tv_now))
			{
djp: ;
				if (!do_job_prepare(mythr, &tv_now))
					goto disabled;
			}
			
defer_events:
			if (timer_passed(&mythr->tv_poll, &tv_now))
				api->poll(mythr);
			
			reduce_timeout_to(&tv_timeout, &mythr->tv_morework);
			reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
		}
		
		do_notifier_select(thr, &tv_timeout);
	}
}
Beispiel #13
0
// Miner loop to manage a single processor (with possibly multiple threads per processor)
void minerloop_scanhash(struct thr_info *mythr)
{
	struct cgpu_info *cgpu = mythr->cgpu;
	struct device_drv *api = cgpu->drv;
	struct timeval tv_start, tv_end;
	struct timeval tv_hashes, tv_worktime;
	uint32_t max_nonce = api->can_limit_work ? api->can_limit_work(mythr) : 0xffffffff;
	int64_t hashes;
	struct work *work;
	const bool primary = (!mythr->device_thread) || mythr->primary_thread;
	
#ifdef HAVE_PTHREAD_CANCEL
	pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
#endif
	
	while (likely(!cgpu->shutdown)) {
		mythr->work_restart = false;
		request_work(mythr);
		work = get_and_prepare_work(mythr);
		if (!work)
			break;
		timer_set_now(&work->tv_work_start);
		
		do {
			thread_reportin(mythr);
			/* Only allow the mining thread to be cancelled when
			* it is not in the driver code. */
			pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
			timer_set_now(&tv_start);
			hashes = api->scanhash(mythr, work, work->blk.nonce + max_nonce);
			timer_set_now(&tv_end);
			pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
			pthread_testcancel();
			thread_reportin(mythr);
			
			timersub(&tv_end, &tv_start, &tv_hashes);
			if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &max_nonce : NULL))
				goto disabled;
			
			if (unlikely(mythr->work_restart)) {
				/* Apart from device_thread 0, we stagger the
				 * starting of every next thread to try and get
				 * all devices busy before worrying about
				 * getting work for their extra threads */
				if (!primary) {
					struct timespec rgtp;

					rgtp.tv_sec = 0;
					rgtp.tv_nsec = 250 * mythr->device_thread * 1000000;
					nanosleep(&rgtp, NULL);
				}
				break;
			}
			
			if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED))
disabled:
				mt_disable(mythr);
			
			timersub(&tv_end, &work->tv_work_start, &tv_worktime);
		} while (!abandon_work(work, &tv_worktime, cgpu->max_hashes));
		free_work(work);
	}
}