Esempio n. 1
0
static bool gridseed_full(struct cgpu_info *gridseed)
{
	GRIDSEED_INFO *info = gridseed->device_data;
	struct work *work;
	int subid, slot;
	bool ret = true;

	//applog(LOG_NOTICE, "Entering %s", __FUNCTION__);
	mutex_lock(&info->qlock);
	if (info->needworks <= 0)
		goto out_unlock;

	work = get_queued(gridseed);
	if (unlikely(!work)) {
		ret = false;
		goto out_unlock;
	}
	subid = info->queued++;
	work->subid = subid;
	work->devflag = false; /* true when send to device */

	if (info->soft_queue_len >= GRIDSEED_SOFT_QUEUE_LEN)
		__gridseed_purge_work_queue(gridseed, info, 1);
	info->workqueue[info->soft_queue_len++] = work;
	info->needworks--;

	ret = (info->needworks <= 0);

out_unlock:
	mutex_unlock(&info->qlock);
	return ret;
}
Esempio n. 2
0
static int64_t bitfury_scanHash(struct thr_info *thr)
{
	static struct bitfury_device *devices; // TODO Move somewhere to appropriate place
	int chip_n;
	int chip;
	uint64_t hashes = 0;
	struct timeval now;
	unsigned char line[2048];
	int short_stat = 10;
	static time_t short_out_t;
	int long_stat = 1800;
	static time_t long_out_t;
	int long_long_stat = 60 * 30;
	static time_t long_long_out_t;
	static first = 0; //TODO Move to detect()
	int i;

	devices = thr->cgpu->devices;
	chip_n = thr->cgpu->chip_n;

	if (!first) {
		for (i = 0; i < chip_n; i++) {
			devices[i].osc6_bits = 54;
		}
		for (i = 0; i < chip_n; i++) {
			send_reinit(devices[i].slot, devices[i].fasync, devices[i].osc6_bits);
		}
	}
	first = 1;

	for (chip = 0; chip < chip_n; chip++) {
		devices[chip].job_switched = 0;
		if(!devices[chip].work) {
			devices[chip].work = get_queued(thr->cgpu);
			if (devices[chip].work == NULL) {
				return 0;
			}
			work_to_payload(&(devices[chip].payload), devices[chip].work);
		}
	}

	libbitfury_sendHashData(devices, chip_n);
	nmsleep(5);

	cgtime(&now);
	chip = 0;
	for (;chip < chip_n; chip++) {
		if (devices[chip].job_switched) {
			int i,j;
			int *res = devices[chip].results;
			struct work *work = devices[chip].work;
			struct work *owork = devices[chip].owork;
			struct work *o2work = devices[chip].o2work;
			i = devices[chip].results_n;
			for (j = i - 1; j >= 0; j--) {
				if (owork) {
					submit_nonce(thr, owork, bswap_32(res[j]));
					devices[chip].stat_ts[devices[chip].stat_counter++] =
						now.tv_sec;
					if (devices[chip].stat_counter == BITFURY_STAT_N) {
						devices[chip].stat_counter = 0;
					}
				}
				if (o2work) {
					// TEST
					//submit_nonce(thr, owork, bswap_32(res[j]));
				}
			}
			devices[chip].results_n = 0;
			devices[chip].job_switched = 0;
			if (devices[chip].old_nonce && o2work) {
					submit_nonce(thr, o2work, bswap_32(devices[chip].old_nonce));
					i++;
			}
			if (devices[chip].future_nonce) {
					submit_nonce(thr, work, bswap_32(devices[chip].future_nonce));
					i++;
			}

			if (o2work)
				work_completed(thr->cgpu, o2work);

			devices[chip].o2work = devices[chip].owork;
			devices[chip].owork = devices[chip].work;
			devices[chip].work = NULL;
			hashes += 0xffffffffull * i;
		}
	}

	if (now.tv_sec - short_out_t > short_stat) {
		int shares_first = 0, shares_last = 0, shares_total = 0;
		char stat_lines[32][256] = {0};
		int len, k;
		double gh[32][8] = {0};
		double ghsum = 0, gh1h = 0, gh2h = 0;
		unsigned strange_counter = 0;

		for (chip = 0; chip < chip_n; chip++) {
			int shares_found = calc_stat(devices[chip].stat_ts, short_stat, now);
			double ghash;
			len = strlen(stat_lines[devices[chip].slot]);
			ghash = shares_to_ghashes(shares_found, short_stat);
			gh[devices[chip].slot][chip & 0x07] = ghash;
			snprintf(stat_lines[devices[chip].slot] + len, 256 - len, "%.1f-%3.0f ", ghash, devices[chip].mhz);

			if(short_out_t && ghash < 0.5) {
				applog(LOG_WARNING, "Chip_id %d FREQ CHANGE\n", chip);
				send_freq(devices[chip].slot, devices[chip].fasync, devices[chip].osc6_bits - 1);
				nmsleep(1);
				send_freq(devices[chip].slot, devices[chip].fasync, devices[chip].osc6_bits);
			}
			shares_total += shares_found;
			shares_first += chip < 4 ? shares_found : 0;
			shares_last += chip > 3 ? shares_found : 0;
			strange_counter += devices[chip].strange_counter;
			devices[chip].strange_counter = 0;
		}
		sprintf(line, "vvvvwww SHORT stat %ds: wwwvvvv", short_stat);
		applog(LOG_WARNING, line);
		sprintf(line, "stranges: %u", strange_counter);
		applog(LOG_WARNING, line);
		for(i = 0; i < 32; i++)
			if(strlen(stat_lines[i])) {
				len = strlen(stat_lines[i]);
				ghsum = 0;
				gh1h = 0;
				gh2h = 0;
				for(k = 0; k < 4; k++) {
					gh1h += gh[i][k];
					gh2h += gh[i][k+4];
					ghsum += gh[i][k] + gh[i][k+4];
				}
				snprintf(stat_lines[i] + len, 256 - len, "- %2.1f + %2.1f = %2.1f slot %i ", gh1h, gh2h, ghsum, i);
				applog(LOG_WARNING, stat_lines[i]);
			}
		short_out_t = now.tv_sec;
	}

	if (now.tv_sec - long_out_t > long_stat) {
		int shares_first = 0, shares_last = 0, shares_total = 0;
		char stat_lines[32][256] = {0};
		int len, k;
		double gh[32][8] = {0};
		double ghsum = 0, gh1h = 0, gh2h = 0;

		for (chip = 0; chip < chip_n; chip++) {
			int shares_found = calc_stat(devices[chip].stat_ts, long_stat, now);
			double ghash;
			len = strlen(stat_lines[devices[chip].slot]);
			ghash = shares_to_ghashes(shares_found, long_stat);
			gh[devices[chip].slot][chip & 0x07] = ghash;
			snprintf(stat_lines[devices[chip].slot] + len, 256 - len, "%.1f-%3.0f ", ghash, devices[chip].mhz);

			shares_total += shares_found;
			shares_first += chip < 4 ? shares_found : 0;
			shares_last += chip > 3 ? shares_found : 0;
		}
		sprintf(line, "!!!_________ LONG stat %ds: ___________!!!", long_stat);
		applog(LOG_WARNING, line);
		for(i = 0; i < 32; i++)
			if(strlen(stat_lines[i])) {
				len = strlen(stat_lines[i]);
				ghsum = 0;
				gh1h = 0;
				gh2h = 0;
				for(k = 0; k < 4; k++) {
					gh1h += gh[i][k];
					gh2h += gh[i][k+4];
					ghsum += gh[i][k] + gh[i][k+4];
				}
				snprintf(stat_lines[i] + len, 256 - len, "- %2.1f + %2.1f = %2.1f slot %i ", gh1h, gh2h, ghsum, i);
				applog(LOG_WARNING, stat_lines[i]);
			}
		long_out_t = now.tv_sec;
	}

	return hashes;
}
Esempio n. 3
0
static bool spondoolies_queue_full_sp30(struct cgpu_info *cgpu)
{
	struct spond_adapter* a = cgpu->device_data;
#if 0
	static int bla = 0;

	if (!((bla++)%500)) {
		printf("FAKE TEST FLUSH T:%d!\n",usec_stamp());
		a->reset_mg_queue = 3;
	}
#endif
	// Only once every 1/10 second do work.
	bool ret = false, do_sleep = false;
	int next_job_id;
	struct timeval tv;
	struct work *work;
	unsigned int usec;

	mutex_lock(&a->lock);
	assert(a->works_pending_tx <= REQUEST_SIZE);

	gettimeofday(&tv, NULL);

	usec = (tv.tv_sec-last_force_queue.tv_sec) * 1000000;
	usec += (tv.tv_usec-last_force_queue.tv_usec);

	if ((usec >= REQUEST_PERIOD) ||
		(a->reset_mg_queue == 3) || // push flush
		((a->reset_mg_queue == 2)) || // Fast pull
		((a->reset_mg_queue == 1) && (a->works_pending_tx == REQUEST_SIZE))) { // Fast push after flush
			spondoolies_flush_queue(a, (a->reset_mg_queue == 3));
			if (a->reset_mg_queue) {
				//printf("FLUSH(%d) %d T:%d\n",a->reset_mg_queue , a->works_pending_tx, usec_stamp());
				if (a->works_pending_tx || (a->reset_mg_queue == 3)) {
					a->reset_mg_queue--;
				}
			}
			last_force_queue = tv;
	}

	// see if we have enough jobs
	if (a->works_pending_tx == REQUEST_SIZE) {
		ret = true;
		goto return_unlock;
	}

	// see if can take 1 more job.
	// Must be smaller to prevent overflow.
	assert(MAX_JOBS_PENDING_IN_MINERGATE_SP30 < MINERGATE_ADAPTER_QUEUE_SP30);
	next_job_id = (a->current_job_id + 1) % MAX_JOBS_PENDING_IN_MINERGATE_SP30;
	if (a->my_jobs[next_job_id].cgminer_work) {
		ret = true;
		goto return_unlock;
	}
	work = get_queued(cgpu);
	if (unlikely(!work)) {
		do_sleep = true;
		goto return_unlock;
	}

	work->thr = cgpu->thr[0];
	work->thr_id = cgpu->thr[0]->id;
	assert(work->thr);

	a->current_job_id = next_job_id;
	work->subid = a->current_job_id;
	// Get pointer for the request
	a->my_jobs[a->current_job_id].cgminer_work = work;
	a->my_jobs[a->current_job_id].state = SPONDWORK_STATE_IN_BUSY;
	//printf("Push: %d\n", a->current_job_id);

	int max_ntime_roll = (work->drv_rolllimit < MAX_NROLES) ? work->drv_rolllimit : MAX_NROLES;
	minergate_do_job_req_sp30* pkt_job =  &a->mp_next_req->req[a->works_pending_tx];
	fill_minergate_request(pkt_job, work, max_ntime_roll);
	a->works_in_driver++;
	a->works_pending_tx++;
	a->mp_next_req->req_count++;
	a->my_jobs[a->current_job_id].merkle_root = pkt_job->mrkle_root;

return_unlock:
	//printf("D:P.TX:%d inD:%d\n", a->works_pending_tx, a->works_in_driver);
	mutex_unlock(&a->lock);

	if (do_sleep)
		cgsleep_ms(10);

	return ret;
}
static int64_t bitfury_scanHash(struct thr_info *thr)
{
	static struct bitfury_device *devices; // TODO Move somewhere to appropriate place
	int chip_n;
	int chip;
	uint64_t hashes = 0;
	unsigned char line[2048];
	
	char stat_lines[32][256] = {0};
	
	static first = 0; //TODO Move to detect()
	int i;
	static int shift_number = 1;
	static struct timeval spi_started;
	struct timeval now;
	struct cgpu_info *cgpu = thr->cgpu;
	devices = thr->cgpu->devices;
	chip_n = thr->cgpu->chip_n;

	if (!first) {
		for (i = 0; i < chip_n; i++) {
			devices[i].osc6_bits = 50;
		}        
		set_chip_opts(devices, chip_n);
		for (i = 0; i < chip_n; i++) {
			send_reinit(devices[i].slot, devices[i].fasync, devices[i].osc6_bits);
		}
		cgtime(&spi_started);
	}
	first = 1;

	cgtime(&now);
	int wait=1000000*(now.tv_sec-spi_started.tv_sec)+now.tv_usec-spi_started.tv_usec;
	if(wait<800000){
		//cgsleep_ms((800000-wait)/1000);
		if(restart_wait(thr, (800000-wait)/1000) != ETIMEDOUT)
		{
			//purge work
			for (;chip < chip_n; chip++)
			{
				if(devices[chip].bfwork.work != NULL)
				{
					work_completed(thr->cgpu, devices[chip].bfwork.work);
				}
				
				devices[chip].bfwork.work = NULL;
				devices[chip].bfwork.results_n = 0;
				devices[chip].bfwork.results_sent = 0;
			}
		}
	}	

	for (chip = 0; chip < chip_n; chip++) {
		devices[chip].job_switched = 0;
		if(!devices[chip].bfwork.work) {
			devices[chip].bfwork.work = get_queued(thr->cgpu);
			if (devices[chip].bfwork.work == NULL) {
				return 0;
			}
			work_to_payload(&(devices[chip].bfwork.payload), devices[chip].bfwork.work);
		}
	}
	cgtime(&spi_started);
	libbitfury_sendHashData(devices, chip_n);
	

	
	chip = 0;
	int high = 0;
	double aveg = 0.0;
	int total = 0;
	int futures =0;
	for (;chip < chip_n; chip++) {

		if (devices[chip].job_switched) {
			int i=0;
			struct work *work = devices[chip].bfwork.work;
			struct work *owork = devices[chip].obfwork.work;
			struct work *o2work = devices[chip].o2bfwork.work;

			if (owork)
				i+=submit_work(&devices[chip].obfwork, thr);
			if (o2work)
				i+=submit_work(&devices[chip].o2bfwork, thr);
			if (work)
				i+=submit_work(&devices[chip].bfwork, thr);	


			high = high > i?high:i;
			total+=i;

			devices[chip].job_switched = 0;

			if (o2work)
				work_completed(thr->cgpu, o2work);

			//printf("%d %d %d\n",devices[chip].o2bfwork.results_n,devices[chip].obfwork.results_n,devices[chip].bfwork.results_n);
			
			memcpy (&(devices[chip].o2bfwork),&(devices[chip].obfwork),sizeof(struct bitfury_work));
			memcpy (&(devices[chip].obfwork),&(devices[chip].bfwork),sizeof(struct bitfury_work));
			devices[chip].bfwork.work = NULL;
			devices[chip].bfwork.results_n = 0;
			devices[chip].bfwork.results_sent = 0;
			hashes += 0xffffffffull * i;
		}
		/*
		if(shift_number % 100 == 0)
		{
			int len = strlen(stat_lines[devices[chip].slot]);
			snprintf(stat_lines[devices[chip].slot]+len,256-len,"%d: %d/%d ",chip,devices[chip].nonces_found/devices[chip].nonce_errors);
		}
		*/
		
	}

	aveg = (double) total / chip_n;
	//applog(LOG_WARNING, "high: %d aver: %4.2f total %d futures %d", high, aveg,total,futures);
	if(shift_number % 100 == 0)
	{
		/*

		applog(LOG_WARNING,stat_lines[0]);
		applog(LOG_WARNING,stat_lines[1]);
		applog(LOG_WARNING,stat_lines[2]);
		applog(LOG_WARNING,stat_lines[3]);
		*/
	}

	
	shift_number++;

	return hashes;
}