Exemple #1
0
bool do_job_prepare(struct thr_info *mythr, struct timeval *tvp_now)
{
	struct cgpu_info *proc = mythr->cgpu;
	struct device_drv *api = proc->drv;
	struct timeval tv_worktime;
	
	mythr->tv_morework.tv_sec = -1;
	mythr->_job_transition_in_progress = true;
	if (mythr->work)
		timersub(tvp_now, &mythr->work->tv_work_start, &tv_worktime);
	if ((!mythr->work) || abandon_work(mythr->work, &tv_worktime, proc->max_hashes))
	{
		mythr->work_restart = false;
		request_work(mythr);
		// FIXME: Allow get_work to return NULL to retry on notification
		if (mythr->next_work)
			free_work(mythr->next_work);
		mythr->next_work = get_and_prepare_work(mythr);
		if (!mythr->next_work)
			return false;
		mythr->starting_next_work = true;
		api->job_prepare(mythr, mythr->next_work, mythr->_max_nonce);
	}
	else
	{
		mythr->starting_next_work = false;
		api->job_prepare(mythr, mythr->work, mythr->_max_nonce);
	}
	job_prepare_complete(mythr);
	return true;
}
Exemple #2
0
void* scan_nodes(void* threadid) {
  int i, j, k;
  long tid;

  unsigned int min_nbr_height;
  Edge nbr_edge, min_height_edge;
  long d;
  long local_e;
  long local_c;
  int max_flow;
  int isInQ;
  int height_before_lift;
  int node_push_times = 0, node_lift_times = 0;
  int op_times = 0;

  tid = (long)threadid;
  Node* cur_node;
  ThreadEnv* thisThread;
  thisThread = threadEnv + tid;

#if 0
  if (tid == 0)
    global_relabel(tid);
#endif

  pthread_barrier_wait(&start_barrier);

  while (!flow_done()) {
    cur_node = thisThread->Q[0];

    while (cur_node != NoNode) {

#ifdef GR
      if (op_times > gr_threshold) {
        op_times = 0;
        if (pthread_mutex_trylock(&gr_mutex) == 0) {
          global_relabel(tid);
          pthread_mutex_unlock(&gr_mutex);
        }
      }
#endif

      while (cur_node->excess > 0) {
#ifdef DEBUG
          fprintf(stderr,
                  "%d h=%d, e=%ld\n",
                  cur_node - g_node,
                  cur_node->height,
                  cur_node->excess);
          fflush(stderr);
#endif
        min_nbr_height = UINT_MAX;
        for (nbr_edge = cur_node->adj_list; nbr_edge < (cur_node + 1)->adj_list;
             nbr_edge++) {

          if (nbr_edge->capacity > 0 &&
              (nbr_edge->endpoint)->height < min_nbr_height) {
            min_nbr_height = (nbr_edge->endpoint)->height;
            min_height_edge = nbr_edge;
          }
        }

#ifdef DEBUG
          fprintf(stderr, "work on %d\n", cur_node - g_node);
          fflush(stderr);
#endif
        if (cur_node->height > min_nbr_height) {
          local_e = cur_node->excess;
          local_c = min_height_edge->capacity;

          d = MIN(local_e, local_c);
          if (min_height_edge->endpoint->wave == cur_node->wave &&
              cur_node->height > min_height_edge->endpoint->height) {
            node_push_times++;
            op_times++;

            atomic_add(d, &(min_height_edge->mateedge->capacity));
            atomic_sub(d, &(min_height_edge->capacity));

            atomic_add(d, &((min_height_edge->endpoint)->excess));
            atomic_sub(d, &(cur_node->excess));

#if defined(PUSH) || defined(DEBUG)
              fprintf(stderr,
                      "[%ld] %ld(%ld) -> %ld -> %ld(%ld) \n",
                      tid,
                      cur_node - g_node,
                      cur_node->excess,
                      d,
                      min_height_edge->endpoint - g_node,
                      (min_height_edge->endpoint)->excess);
              fflush(stderr);
#endif
            // add min_nbr to local queue
            isInQ = cmpxchg(&(min_height_edge->endpoint->inQ), 0, tid + 1);
            if (isInQ == 0)
              enQ(thisThread, min_height_edge->endpoint);
          }
        } else {
          // if we cannot push to any nodes, then we must be able to lift
          node_lift_times++;
          op_times++;

          pthread_mutex_lock(&(node_mutex[cur_node - g_node]));
          if (cur_node->height < min_nbr_height + 1)
            cur_node->height = min_nbr_height + 1;
          pthread_mutex_unlock(&(node_mutex[cur_node - g_node]));
#if defined(LIFT) || defined(DEBUG)
            fprintf(stderr,
                    "%ld ^ %d, ref %ld(%d)\n",
                    cur_node - g_node,
                    cur_node->height,
                    min_height_edge->endpoint - g_node,
                    min_height_edge->endpoint->height);
            fflush(stderr);
#endif
        }
      }  // while( g_node[i].excess > 0 )
      set0(&(cur_node->inQ));

      if (cur_node->excess > 0) {
        isInQ = cmpxchg(&(cur_node->inQ), 0, tid + 1);
        if (isInQ == 0) {
          reenQ(thisThread, cur_node);
        } else {
          deQ(thisThread);
        }
      } else {
        deQ(thisThread);
      }

#ifdef HELP
      if (thisThread->request < MAX_THRD)
        send_work(tid);
#endif

      cur_node = thisThread->Q[thisThread->head];
    }  // while (i != -1)
#ifdef HELP
    // Q is empty, find something to do;
    request_work(tid);
#else
    break;
#endif
  }  // while(!flow_done())

  atomic_add(node_push_times, &(totalPushes));
  atomic_add(node_lift_times, &(totalLifts));
}  // scan_node
Exemple #3
0
void minerloop_queue(struct thr_info *thr)
{
	struct thr_info *mythr;
	struct cgpu_info *cgpu = thr->cgpu;
	struct device_drv *api = cgpu->drv;
	struct timeval tv_now;
	struct timeval tv_timeout;
	struct cgpu_info *proc;
	bool should_be_running;
	struct work *work;
	
	if (thr->work_restart_notifier[1] == -1)
		notifier_init(thr->work_restart_notifier);
	
	while (likely(!cgpu->shutdown)) {
		tv_timeout.tv_sec = -1;
		timer_set_now(&tv_now);
		for (proc = cgpu; proc; proc = proc->next_proc)
		{
			mythr = proc->thr[0];
			
			should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
redo:
			if (should_be_running)
			{
				if (unlikely(!mythr->_last_sbr_state))
				{
					mt_disable_finish(mythr);
					mythr->_last_sbr_state = should_be_running;
				}
				
				if (unlikely(mythr->work_restart))
				{
					mythr->work_restart = false;
					do_queue_flush(mythr);
				}
				
				while (!mythr->queue_full)
				{
					if (mythr->next_work)
					{
						work = mythr->next_work;
						mythr->next_work = NULL;
					}
					else
					{
						request_work(mythr);
						// FIXME: Allow get_work to return NULL to retry on notification
						work = get_and_prepare_work(mythr);
					}
					if (!work)
						break;
					if (!api->queue_append(mythr, work))
						mythr->next_work = work;
				}
			}
			else
			if (unlikely(mythr->_last_sbr_state))
			{
				mythr->_last_sbr_state = should_be_running;
				do_queue_flush(mythr);
			}
			
			if (timer_passed(&mythr->tv_poll, &tv_now))
				api->poll(mythr);
			
			should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause);
			if (should_be_running && !mythr->queue_full)
				goto redo;
			
			reduce_timeout_to(&tv_timeout, &mythr->tv_poll);
		}
		
		do_notifier_select(thr, &tv_timeout);
	}
}
Exemple #4
0
// Miner loop to manage a single processor (with possibly multiple threads per processor)
void minerloop_scanhash(struct thr_info *mythr)
{
	struct cgpu_info *cgpu = mythr->cgpu;
	struct device_drv *api = cgpu->drv;
	struct timeval tv_start, tv_end;
	struct timeval tv_hashes, tv_worktime;
	uint32_t max_nonce = api->can_limit_work ? api->can_limit_work(mythr) : 0xffffffff;
	int64_t hashes;
	struct work *work;
	const bool primary = (!mythr->device_thread) || mythr->primary_thread;
	
#ifdef HAVE_PTHREAD_CANCEL
	pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
#endif
	
	while (likely(!cgpu->shutdown)) {
		mythr->work_restart = false;
		request_work(mythr);
		work = get_and_prepare_work(mythr);
		if (!work)
			break;
		timer_set_now(&work->tv_work_start);
		
		do {
			thread_reportin(mythr);
			/* Only allow the mining thread to be cancelled when
			* it is not in the driver code. */
			pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
			timer_set_now(&tv_start);
			hashes = api->scanhash(mythr, work, work->blk.nonce + max_nonce);
			timer_set_now(&tv_end);
			pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
			pthread_testcancel();
			thread_reportin(mythr);
			
			timersub(&tv_end, &tv_start, &tv_hashes);
			if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &max_nonce : NULL))
				goto disabled;
			
			if (unlikely(mythr->work_restart)) {
				/* Apart from device_thread 0, we stagger the
				 * starting of every next thread to try and get
				 * all devices busy before worrying about
				 * getting work for their extra threads */
				if (!primary) {
					struct timespec rgtp;

					rgtp.tv_sec = 0;
					rgtp.tv_nsec = 250 * mythr->device_thread * 1000000;
					nanosleep(&rgtp, NULL);
				}
				break;
			}
			
			if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED))
disabled:
				mt_disable(mythr);
			
			timersub(&tv_end, &work->tv_work_start, &tv_worktime);
		} while (!abandon_work(work, &tv_worktime, cgpu->max_hashes));
		free_work(work);
	}
}