Esempio n. 1
0
static void gc3355_set_core_freq(struct cgpu_info *gridseed)
{
	GRIDSEED_INFO *info = (GRIDSEED_INFO*)(gridseed->device_data);
	gc3355_write_data(gridseed, info->freq_cmd, sizeof(info->freq_cmd));
	cgsleep_ms(GRIDSEED_COMMAND_DELAY);
	applog(LOG_NOTICE, "Set GC3355 core frequency to %d MHz", info->freq);
}
Esempio n. 2
0
static bool gc3355_write_register(struct cgpu_info *gridseed, uint32_t reg_addr,
				  uint32_t reg_value) {
	GRIDSEED_INFO *info = (GRIDSEED_INFO*)(gridseed->device_data);
	char cmd[16] = "\x55\xaa\xc0\x02";
	uint32_t reg_len = 4;
	unsigned char buf[4];

	if (info->fw_version != 0x01140113) {
		applog(LOG_ERR, "Can't write registers; incompatible firmware %08X on %i",
			info->fw_version, gridseed->device_id);
		return false;
	}

	*(uint32_t *)(cmd + 4) = htole32(reg_addr);
	*(uint32_t *)(cmd + 8) = htole32(reg_value);
	*(uint32_t *)(cmd + 12) = htole32(reg_len);
	if (gc3355_write_data(gridseed, cmd, sizeof(cmd)) != 0) {
		applog(LOG_DEBUG, "Failed to write data to %i", gridseed->device_id);
		return false;
	}
	cgsleep_ms(GRIDSEED_COMMAND_DELAY);

	if (gc3355_get_data(gridseed, buf, 4)) {
		applog(LOG_DEBUG, "No response from %i", gridseed->device_id);
		return false;
	}
	return true;
}
Esempio n. 3
0
/* A generic wait function for threads that poll that will wait a specified
 * time tdiff waiting on a work restart request. Returns zero if the condition
 * was met (work restart requested) or ETIMEDOUT if not.
 */
int restart_wait(struct thr_info *thr, unsigned int mstime)
{
	struct timeval tv_timer, tv_now, tv_timeout;
	fd_set rfds;
	SOCKETTYPE wrn = thr->work_restart_notifier[0];
	int rv;
	
	if (unlikely(thr->work_restart_notifier[1] == INVSOCK))
	{
		// This is a bug!
		applog(LOG_ERR, "%"PRIpreprv": restart_wait called without a work_restart_notifier", thr->cgpu->proc_repr);
		cgsleep_ms(mstime);
		return (thr->work_restart ? 0 : ETIMEDOUT);
	}
	
	timer_set_now(&tv_now);
	timer_set_delay(&tv_timer, &tv_now, mstime * 1000);
	while (true)
	{
		FD_ZERO(&rfds);
		FD_SET(wrn, &rfds);
		tv_timeout = tv_timer;
		rv = select(wrn + 1, &rfds, NULL, NULL, select_timeout(&tv_timeout, &tv_now));
		if (rv == 0)
			return ETIMEDOUT;
		if (rv > 0)
		{
			if (thr->work_restart)
				return 0;
			notifier_read(thr->work_restart_notifier);
		}
		timer_set_now(&tv_now);
	}
}
Esempio n. 4
0
static void bitforce_flash_led(struct cgpu_info *bitforce)
{
	int err, amount;

	/* Do not try to flash the led if we're polling for a result to
	 * minimise the chance of interleaved results */
	if (bitforce->polling)
		return;

	/* It is not critical flashing the led so don't get stuck if we
	 * can't grab the mutex now */
	if (mutex_trylock(&bitforce->device_mutex))
		return;

	if ((err = usb_write(bitforce, BITFORCE_FLASH, BITFORCE_FLASH_LEN, &amount, C_REQUESTFLASH)) < 0 || amount != BITFORCE_FLASH_LEN) {
		applog(LOG_ERR, "%s%i: flash request failed (%d:%d)",
			bitforce->drv->name, bitforce->device_id, amount, err);
	} else {
		/* However, this stops anything else getting a reply
		 * So best to delay any other access to the BFL */
		cgsleep_ms(4000);
	}

	/* Once we've tried - don't do it until told to again */
	bitforce->flash_led = false;

	mutex_unlock(&bitforce->device_mutex);

	return; // nothing is returned by the BFL
}
Esempio n. 5
0
static void gc3355_enable_btc_cores(struct cgpu_info *gridseed, GRIDSEED_INFO *info)
{
	unsigned char cmd[24], c1, c2;
	uint16_t	mask;
	int i;

	mask = 0x00;
	for(i=0; i<info->btcore; i++)
		mask = mask << 1 | 0x01;

	if (mask == 0)
		return;

	c1 = mask & 0x00ff;
	c2 = mask >> 8;

	memset(cmd, 0, sizeof(cmd));
	memcpy(cmd, "\x55\xAA\xEF\x02", 4);
	for(i=4; i<24; i++) {
		cmd[i] = ((i%2)==0) ? c1 : c2;
		gc3355_write_data(gridseed, cmd, sizeof(cmd));
		cgsleep_ms(GRIDSEED_COMMAND_DELAY);
	}
	return;
}
Esempio n. 6
0
static void gc3355_send_cmds_bin(struct cgpu_info *gridseed, const char *cmds[], int size)
{
	int				i;

	for(i=0; ; i++) {
		if (cmds[i] == NULL)
			break;
		gc3355_write_data(gridseed, (char *)cmds[i], size);
		cgsleep_ms(GRIDSEED_COMMAND_DELAY);
	}
	return;
}
Esempio n. 7
0
static void gc3355_send_cmds(struct cgpu_info *gridseed, const char *cmds[])
{
	unsigned char	ob[512];
	int				i;

	for(i=0; ; i++) {
		if (cmds[i] == NULL)
			break;
		hex2bin(ob, cmds[i], sizeof(ob));
		gc3355_write_data(gridseed, ob, strlen(cmds[i])/2);
		cgsleep_ms(GRIDSEED_COMMAND_DELAY);
	}
}
Esempio n. 8
0
static bool bitforce_thread_init(struct thr_info *thr)
{
	struct cgpu_info *bitforce = thr->cgpu;
	unsigned int wait;

	/* Pause each new thread at least 100ms between initialising
	 * so the devices aren't making calls all at the same time. */
	wait = thr->id * MAX_START_DELAY_MS;
	applog(LOG_DEBUG, "%s%d: Delaying start by %dms",
			bitforce->drv->name, bitforce->device_id, wait / 1000);
	cgsleep_ms(wait);

	return true;
}
Esempio n. 9
0
void *miner_thread(void *userdata)
{
	struct thr_info *mythr = userdata;
	struct cgpu_info *cgpu = mythr->cgpu;
	struct device_drv *drv = cgpu->drv;

	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);

	char threadname[20];
	snprintf(threadname, 20, "miner_%s", cgpu->proc_repr_ns);
	RenameThread(threadname);

	if (drv->thread_init && !drv->thread_init(mythr)) {
		dev_error(cgpu, REASON_THREAD_FAIL_INIT);
		for (struct cgpu_info *slave = cgpu->next_proc; slave && !slave->threads; slave = slave->next_proc)
			dev_error(slave, REASON_THREAD_FAIL_INIT);
		__thr_being_msg(LOG_ERR, mythr, "failure, exiting");
		goto out;
	}

	thread_reportout(mythr);
	applog(LOG_DEBUG, "Popping ping in miner thread");
	notifier_read(mythr->notifier);  // Wait for a notification to start

	cgtime(&cgpu->cgminer_stats.start_tv);
	if (drv->minerloop)
		drv->minerloop(mythr);
	else
		minerloop_scanhash(mythr);
	__thr_being_msg(LOG_NOTICE, mythr, "shutting down");

out: ;
	struct cgpu_info *proc = cgpu;
	do
	{
		proc->deven = DEV_DISABLED;
		proc->status = LIFE_DEAD2;
	}
	while ( (proc = proc->next_proc) && !proc->threads);
	mythr->getwork = 0;
	mythr->has_pth = false;
	cgsleep_ms(1000);
	
	if (drv->thread_shutdown)
		drv->thread_shutdown(mythr);

	notifier_destroy(mythr->notifier);

	return NULL;
}
Esempio n. 10
0
static void gc3355_init(struct cgpu_info *gridseed, GRIDSEED_INFO *info)
{
	unsigned char buf[512];
	int amount;

	applog(LOG_NOTICE, "System reseting");
	gc3355_send_cmds(gridseed, str_reset);
	cgsleep_ms(200);
	usb_buffer_clear(gridseed);
	usb_read_timeout(gridseed, buf, sizeof(buf), &amount, 10, C_GETRESULTS);
	gc3355_send_cmds(gridseed, str_init);
	gc3355_send_cmds(gridseed, str_ltc_reset);
	gc3355_set_core_freq(gridseed);
	if (info->voltage)
		gc3355_increase_voltage(gridseed);
}
Esempio n. 11
0
static int polling(struct thr_info *thr)
{
	int i, tmp;

	struct avalon2_pkg send_pkg;
	struct avalon2_ret ar;

	struct cgpu_info *avalon2 = thr->cgpu;
	struct avalon2_info *info = avalon2->device_data;

	static int pre_led_red[AVA2_DEFAULT_MODULARS];
	for (i = 0; i < AVA2_DEFAULT_MODULARS; i++) {
		if (info->modulars[i] && info->enable[i]) {
			cgsleep_ms(20);
			memset(send_pkg.data, 0, AVA2_P_DATA_LEN);

			tmp = be32toh(info->led_red[i]); /* RED LED */
			memcpy(send_pkg.data + 12, &tmp, 4);

			tmp = be32toh(i); /* ID */
			memcpy(send_pkg.data + 28, &tmp, 4);
			if (info->led_red[i] && mm_cmp_1404(info, i)) {
				avalon2_init_pkg(&send_pkg, AVA2_P_TEST, 1, 1);
				while (avalon2_send_pkg(info->fd, &send_pkg, thr) != AVA2_SEND_OK)
					;
				info->enable[i] = 0;
				continue;
			} else
				avalon2_init_pkg(&send_pkg, AVA2_P_POLLING, 1, 1);

			while (avalon2_send_pkg(info->fd, &send_pkg, thr) != AVA2_SEND_OK)
				;
			avalon2_get_result(thr, info->fd, &ar);
		}
	}

	return 0;
}
Esempio n. 12
0
static void gc3355_init(struct cgpu_info *gridseed, GRIDSEED_INFO *info, bool set_nonce)
{
	unsigned char buf[512];
	int amount;

	applog(LOG_NOTICE, "System reseting");
	gc3355_send_cmds(gridseed, str_reset);
	cgsleep_ms(200);
	usb_buffer_clear(gridseed);
	usb_read_timeout(gridseed, buf, sizeof(buf), &amount, 10, C_GETRESULTS);
	gc3355_send_cmds(gridseed, str_init);
	gc3355_send_cmds(gridseed, str_ltc_reset);
	gc3355_set_core_freq(gridseed);
	if (set_nonce)
		gc3355_set_init_nonce(gridseed);
	//gc3355_send_cmds(gridseed, str_baud);
	//gc3355_send_cmds(gridseed, str_enable_btc_cores);
	gc3355_enable_btc_cores(gridseed, info);
	if (info->usefifo == 0)
		gc3355_send_cmds(gridseed, str_nofifo);
	gridseed_request_ltc_task(gridseed, info);
	return;
}
static struct work *wq_dequeue(struct T1_chain *t1, bool sig)
{
	struct work_ent *we;
	struct work *work = NULL;
	struct work_queue *wq = &t1->active_wq;

	if (wq == NULL)
		return NULL;

	/* Sleep only a small duration if there is no work queued in case it's
	 * still refilling rather than we have no upstream work. */
	if (unlikely(!wq->num_elems && sig))
		cgsleep_ms(10);

	mutex_lock(&t1->lock);
	if (likely(wq->num_elems > 0)) {
		we = list_entry(wq->head.next, struct work_ent, head);
		work = we->work;

		list_del(&we->head);
		free(we);
		wq->num_elems--;
	}
Esempio n. 14
0
static int avalon2_send_pkg(int fd, const struct avalon2_pkg *pkg,
			    struct thr_info __maybe_unused *thr)
{
	int ret;
	uint8_t buf[AVA2_WRITE_SIZE];
	size_t nr_len = AVA2_WRITE_SIZE;

	memcpy(buf, pkg, AVA2_WRITE_SIZE);
	if (opt_debug) {
		applog(LOG_DEBUG, "Avalon2: Sent(%ld):", nr_len);
		hexdump((uint8_t *)buf, nr_len);
	}

	ret = write(fd, buf, nr_len);
	if (unlikely(ret != nr_len)) {
		applog(LOG_DEBUG, "Avalon2: Send(%d)!", ret);
		return AVA2_SEND_ERROR;
	}

	cgsleep_ms(20);
#if 0
	ret = avalon2_gets(fd, result);
	if (ret != AVA2_GETS_OK) {
		applog(LOG_DEBUG, "Avalon2: Get(%d)!", ret);
		return AVA2_SEND_ERROR;
	}

	ret = decode_pkg(thr, &ar, result);
	if (ret != AVA2_P_ACK) {
		applog(LOG_DEBUG, "Avalon2: PKG(%d)!", ret);
		hexdump((uint8_t *)result, AVA2_READ_SIZE);
		return AVA2_SEND_ERROR;
	}
#endif

	return AVA2_SEND_OK;
}
Esempio n. 15
0
static int hashratio_send_pkg(int fd, const struct hashratio_pkg *pkg,
			    struct thr_info __maybe_unused *thr)
{
	int ret;
	uint8_t buf[HRTO_WRITE_SIZE];
	int nr_len = HRTO_WRITE_SIZE;

	memcpy(buf, pkg, HRTO_WRITE_SIZE);
//	if (opt_debug) {
//		applog(LOG_DEBUG, "hashratio: Sent(%d):", nr_len);
//		hexdump((uint8_t *)buf, nr_len);
//	}

	ret = write(fd, buf, nr_len);
	if (unlikely(ret != nr_len)) {
		applog(LOG_DEBUG, "hashratio: Send(%d)!", ret);
		return HRTO_SEND_ERROR;
	}

	cgsleep_ms(20);
#if 0
	ret = hashratio_gets(fd, result);
	if (ret != HRTO_GETS_OK) {
		applog(LOG_DEBUG, "hashratio: Get(%d)!", ret);
		return HRTO_SEND_ERROR;
	}

	ret = decode_pkg(thr, &ar, result);
	if (ret != HRTO_P_ACK) {
		applog(LOG_DEBUG, "hashratio: PKG(%d)!", ret);
		hexdump((uint8_t *)result, HRTO_READ_SIZE);
		return HRTO_SEND_ERROR;
	}
#endif

	return HRTO_SEND_OK;
}
Esempio n. 16
0
static void hashratio_stratum_pkgs(struct cgpu_info *hashratio, struct pool *pool)
{
	const int merkle_offset = 36;
	struct hashratio_pkg pkg;
	int i, a, b, tmp;
	unsigned char target[32];
	int job_id_len;
	unsigned short crc;

	/* Send out the first stratum message STATIC */
	applog(LOG_DEBUG, "hashratio: Pool stratum message STATIC: %d, %d, %d, %d, %d, %d",
	       pool->coinbase_len,
	       pool->nonce2_offset,
	       pool->n2size,
	       merkle_offset,
	       pool->merkles,
		   pool->pool_no);
	memset(pkg.data, 0, HRTO_P_DATA_LEN);
	tmp = be32toh(pool->coinbase_len);
	memcpy(pkg.data, &tmp, 4);

	tmp = be32toh(pool->nonce2_offset);
	memcpy(pkg.data + 4, &tmp, 4);

	tmp = be32toh(pool->n2size);
	memcpy(pkg.data + 8, &tmp, 4);

	tmp = be32toh(merkle_offset);
	memcpy(pkg.data + 12, &tmp, 4);

	tmp = be32toh(pool->merkles);
	memcpy(pkg.data + 16, &tmp, 4);

	tmp = be32toh((int)pool->sdiff);
	memcpy(pkg.data + 20, &tmp, 4);

	tmp = be32toh((int)pool->pool_no);
	memcpy(pkg.data + 24, &tmp, 4);

	hashratio_init_pkg(&pkg, HRTO_P_STATIC, 1, 1);
	if (hashratio_send_pkgs(hashratio, &pkg))
		return;

	set_target(target, pool->sdiff);
	memcpy(pkg.data, target, 32);
	if (opt_debug) {
		char *target_str;
		target_str = bin2hex(target, 32);
		applog(LOG_DEBUG, "hashratio: Pool stratum target: %s", target_str);
		free(target_str);
	}
	hashratio_init_pkg(&pkg, HRTO_P_TARGET, 1, 1);
	if (hashratio_send_pkgs(hashratio, &pkg))
		return;

	applog(LOG_DEBUG, "hashratio: Pool stratum message JOBS_ID: %s",
	       pool->swork.job_id);
	memset(pkg.data, 0, HRTO_P_DATA_LEN);

	job_id_len = strlen(pool->swork.job_id);
	crc = crc16((const unsigned char *)pool->swork.job_id, job_id_len);
	pkg.data[0] = (crc & 0xff00) >> 8;
	pkg.data[1] = crc & 0x00ff;
	hashratio_init_pkg(&pkg, HRTO_P_JOB_ID, 1, 1);
	if (hashratio_send_pkgs(hashratio, &pkg))
		return;

	a = pool->coinbase_len / HRTO_P_DATA_LEN;
	b = pool->coinbase_len % HRTO_P_DATA_LEN;
	applog(LOG_DEBUG, "pool->coinbase_len: %d", pool->coinbase_len);
	applog(LOG_DEBUG, "hashratio: Pool stratum message COINBASE: %d %d", a, b);
	for (i = 0; i < a; i++) {
		memcpy(pkg.data, pool->coinbase + i * 32, 32);
		hashratio_init_pkg(&pkg, HRTO_P_COINBASE, i + 1, a + (b ? 1 : 0));
		if (hashratio_send_pkgs(hashratio, &pkg))
			return;
		if (i % 25 == 0) {
			cgsleep_ms(2);
		}
	}
	if (b) {
		memset(pkg.data, 0, HRTO_P_DATA_LEN);
		memcpy(pkg.data, pool->coinbase + i * 32, b);
		hashratio_init_pkg(&pkg, HRTO_P_COINBASE, i + 1, i + 1);
		if (hashratio_send_pkgs(hashratio, &pkg))
			return;
	}

	b = pool->merkles;
	applog(LOG_DEBUG, "hashratio: Pool stratum message MERKLES: %d", b);
	for (i = 0; i < b; i++) {
		memset(pkg.data, 0, HRTO_P_DATA_LEN);
		memcpy(pkg.data, pool->swork.merkle_bin[i], 32);
		hashratio_init_pkg(&pkg, HRTO_P_MERKLES, i + 1, b);
		if (hashratio_send_pkgs(hashratio, &pkg))
			return;
	}

	applog(LOG_DEBUG, "hashratio: Pool stratum message HEADER: 4");
	for (i = 0; i < 4; i++) {
		memset(pkg.data, 0, HRTO_P_HEADER);
		memcpy(pkg.data, pool->header_bin + i * 32, 32);
		hashratio_init_pkg(&pkg, HRTO_P_HEADER, i + 1, 4);
		if (hashratio_send_pkgs(hashratio, &pkg))
			return;

	}
}
Esempio n. 17
0
static bool spondoolies_queue_full_sp30(struct cgpu_info *cgpu)
{
	struct spond_adapter* a = cgpu->device_data;
#if 0
	static int bla = 0;

	if (!((bla++)%500)) {
		printf("FAKE TEST FLUSH T:%d!\n",usec_stamp());
		a->reset_mg_queue = 3;
	}
#endif
	// Only once every 1/10 second do work.
	bool ret = false, do_sleep = false;
	int next_job_id;
	struct timeval tv;
	struct work *work;
	unsigned int usec;

	mutex_lock(&a->lock);
	assert(a->works_pending_tx <= REQUEST_SIZE);

	gettimeofday(&tv, NULL);

	usec = (tv.tv_sec-last_force_queue.tv_sec) * 1000000;
	usec += (tv.tv_usec-last_force_queue.tv_usec);

	if ((usec >= REQUEST_PERIOD) ||
		(a->reset_mg_queue == 3) || // push flush
		((a->reset_mg_queue == 2)) || // Fast pull
		((a->reset_mg_queue == 1) && (a->works_pending_tx == REQUEST_SIZE))) { // Fast push after flush
			spondoolies_flush_queue(a, (a->reset_mg_queue == 3));
			if (a->reset_mg_queue) {
				//printf("FLUSH(%d) %d T:%d\n",a->reset_mg_queue , a->works_pending_tx, usec_stamp());
				if (a->works_pending_tx || (a->reset_mg_queue == 3)) {
					a->reset_mg_queue--;
				}
			}
			last_force_queue = tv;
	}

	// see if we have enough jobs
	if (a->works_pending_tx == REQUEST_SIZE) {
		ret = true;
		goto return_unlock;
	}

	// see if can take 1 more job.
	// Must be smaller to prevent overflow.
	assert(MAX_JOBS_PENDING_IN_MINERGATE_SP30 < MINERGATE_ADAPTER_QUEUE_SP30);
	next_job_id = (a->current_job_id + 1) % MAX_JOBS_PENDING_IN_MINERGATE_SP30;
	if (a->my_jobs[next_job_id].cgminer_work) {
		ret = true;
		goto return_unlock;
	}
	work = get_queued(cgpu);
	if (unlikely(!work)) {
		do_sleep = true;
		goto return_unlock;
	}

	work->thr = cgpu->thr[0];
	work->thr_id = cgpu->thr[0]->id;
	assert(work->thr);

	a->current_job_id = next_job_id;
	work->subid = a->current_job_id;
	// Get pointer for the request
	a->my_jobs[a->current_job_id].cgminer_work = work;
	a->my_jobs[a->current_job_id].state = SPONDWORK_STATE_IN_BUSY;
	//printf("Push: %d\n", a->current_job_id);

	int max_ntime_roll = (work->drv_rolllimit < MAX_NROLES) ? work->drv_rolllimit : MAX_NROLES;
	minergate_do_job_req_sp30* pkt_job =  &a->mp_next_req->req[a->works_pending_tx];
	fill_minergate_request(pkt_job, work, max_ntime_roll);
	a->works_in_driver++;
	a->works_pending_tx++;
	a->mp_next_req->req_count++;
	a->my_jobs[a->current_job_id].merkle_root = pkt_job->mrkle_root;

return_unlock:
	//printf("D:P.TX:%d inD:%d\n", a->works_pending_tx, a->works_in_driver);
	mutex_unlock(&a->lock);

	if (do_sleep)
		cgsleep_ms(10);

	return ret;
}
Esempio n. 18
0
static bool bitforce_send_work(struct thr_info *thr, struct work *work)
{
	struct cgpu_info *bitforce = thr->cgpu;
	unsigned char ob[70];
	char buf[BITFORCE_BUFSIZ+1];
	int err, amount;
	char *s;
	char *cmd;
	int len;

re_send:
	if (bitforce->nonce_range) {
		cmd = BITFORCE_SENDRANGE;
		len = BITFORCE_SENDRANGE_LEN;
	} else {
		cmd = BITFORCE_SENDWORK;
		len = BITFORCE_SENDWORK_LEN;
	}

	mutex_lock(&bitforce->device_mutex);
	if ((err = usb_write(bitforce, cmd, len, &amount, C_REQUESTSENDWORK)) < 0 || amount != len) {
		mutex_unlock(&bitforce->device_mutex);
		applog(LOG_ERR, "%s%i: request send work failed (%d:%d)",
				bitforce->drv->name, bitforce->device_id, amount, err);
		return false;
	}

	if ((err = usb_read_nl(bitforce, buf, sizeof(buf)-1, &amount, C_REQUESTSENDWORKSTATUS)) < 0) {
		mutex_unlock(&bitforce->device_mutex);
		applog(LOG_ERR, "%s%d: read request send work status failed (%d:%d)",
				bitforce->drv->name, bitforce->device_id, amount, err);
		return false;
	}

	if (amount == 0 || !buf[0] || !strncasecmp(buf, "B", 1)) {
		mutex_unlock(&bitforce->device_mutex);
		cgsleep_ms(WORK_CHECK_INTERVAL_MS);
		goto re_send;
	} else if (unlikely(strncasecmp(buf, "OK", 2))) {
		mutex_unlock(&bitforce->device_mutex);
		if (bitforce->nonce_range) {
			applog(LOG_WARNING, "%s%i: Does not support nonce range, disabling",
						bitforce->drv->name, bitforce->device_id);
			bitforce->nonce_range = false;
			bitforce->sleep_ms *= 5;
			bitforce->kname = KNAME_WORK;
			goto re_send;
		}
		applog(LOG_ERR, "%s%i: Error: Send work reports: %s",
				bitforce->drv->name, bitforce->device_id, buf);
		return false;
	}

	sprintf((char *)ob, ">>>>>>>>");
	memcpy(ob + 8, work->midstate, 32);
	memcpy(ob + 8 + 32, work->data + 64, 12);
	if (!bitforce->nonce_range) {
		sprintf((char *)ob + 8 + 32 + 12, ">>>>>>>>");
		work->blk.nonce = bitforce->nonces = 0xffffffff;
		len = 60;
	} else {
		uint32_t *nonce;

		nonce = (uint32_t *)(ob + 8 + 32 + 12);
		*nonce = htobe32(work->blk.nonce);
		nonce = (uint32_t *)(ob + 8 + 32 + 12 + 4);
		/* Split work up into 1/5th nonce ranges */
		bitforce->nonces = 0x33333332;
		*nonce = htobe32(work->blk.nonce + bitforce->nonces);
		work->blk.nonce += bitforce->nonces + 1;
		sprintf((char *)ob + 8 + 32 + 12 + 8, ">>>>>>>>");
		len = 68;
	}

	if ((err = usb_write(bitforce, (char *)ob, len, &amount, C_SENDWORK)) < 0 || amount != len) {
		mutex_unlock(&bitforce->device_mutex);
		applog(LOG_ERR, "%s%i: send work failed (%d:%d)",
				bitforce->drv->name, bitforce->device_id, amount, err);
		return false;
	}

	if ((err = usb_read_nl(bitforce, buf, sizeof(buf)-1, &amount, C_SENDWORKSTATUS)) < 0) {
		mutex_unlock(&bitforce->device_mutex);
		applog(LOG_ERR, "%s%d: read send work status failed (%d:%d)",
				bitforce->drv->name, bitforce->device_id, amount, err);
		return false;
	}

	mutex_unlock(&bitforce->device_mutex);

	if (opt_debug) {
		s = bin2hex(ob + 8, 44);
		applog(LOG_DEBUG, "%s%i: block data: %s",
				bitforce->drv->name, bitforce->device_id, s);
		free(s);
	}

	if (amount == 0 || !buf[0]) {
		applog(LOG_ERR, "%s%i: Error: Send block data returned empty string/timed out",
				bitforce->drv->name, bitforce->device_id);
		return false;
	}

	if (unlikely(strncasecmp(buf, "OK", 2))) {
		applog(LOG_ERR, "%s%i: Error: Send block data reports: %s",
				bitforce->drv->name, bitforce->device_id, buf);
		return false;
	}

	cgtime(&bitforce->work_start_tv);
	return true;
}
Esempio n. 19
0
static bool bitforce_detect_one(struct libusb_device *dev, struct usb_find_devices *found)
{
	char buf[BITFORCE_BUFSIZ+1];
	int err, amount;
	char *s;
	struct timeval init_start, init_now;
	int init_sleep, init_count;
	bool ident_first;

	struct cgpu_info *bitforce = usb_alloc_cgpu(&bitforce_drv, 1);

	if (!usb_init(bitforce, dev, found))
		goto shin;

	// Allow 2 complete attempts if the 1st time returns an unrecognised reply
	ident_first = true;
retry:
	init_count = 0;
	init_sleep = REINIT_TIME_FIRST_MS;
	cgtime(&init_start);
reinit:
	bitforce_initialise(bitforce, false);
	if ((err = usb_write(bitforce, BITFORCE_IDENTIFY, BITFORCE_IDENTIFY_LEN, &amount, C_REQUESTIDENTIFY)) < 0 || amount != BITFORCE_IDENTIFY_LEN) {
		applog(LOG_ERR, "%s detect (%s) send identify request failed (%d:%d)",
			bitforce->drv->dname, bitforce->device_path, amount, err);
		goto unshin;
	}

	if ((err = usb_read_nl(bitforce, buf, sizeof(buf)-1, &amount, C_GETIDENTIFY)) < 0 || amount < 1) {
		init_count++;
		cgtime(&init_now);
		if (us_tdiff(&init_now, &init_start) <= REINIT_TIME_MAX) {
			if (init_count == 2) {
				applog(LOG_WARNING, "%s detect (%s) 2nd init failed (%d:%d) - retrying",
					bitforce->drv->dname, bitforce->device_path, amount, err);
			}
			cgsleep_ms(init_sleep);
			if ((init_sleep * 2) <= REINIT_TIME_MAX_MS)
				init_sleep *= 2;
			goto reinit;
		}

		if (init_count > 0)
			applog(LOG_WARNING, "%s detect (%s) init failed %d times %.2fs",
				bitforce->drv->dname, bitforce->device_path, init_count, tdiff(&init_now, &init_start));

		if (err < 0) {
			applog(LOG_ERR, "%s detect (%s) error identify reply (%d:%d)",
				bitforce->drv->dname, bitforce->device_path, amount, err);
		} else {
			applog(LOG_ERR, "%s detect (%s) empty identify reply (%d)",
				bitforce->drv->dname, bitforce->device_path, amount);
		}

		goto unshin;
	}
	buf[amount] = '\0';

	if (unlikely(!strstr(buf, "SHA256"))) {
		if (ident_first) {
			applog(LOG_WARNING, "%s detect (%s) didn't recognise '%s' trying again ...",
				bitforce->drv->dname, bitforce->device_path, buf);
			ident_first = false;
			goto retry;
		}
		applog(LOG_ERR, "%s detect (%s) didn't recognise '%s' on 2nd attempt",
			bitforce->drv->dname, bitforce->device_path, buf);
		goto unshin;
	}

	if (strstr(buf, "SHA256 SC")) {
#ifdef USE_BFLSC
		applog(LOG_DEBUG, "SC device detected, will defer to BFLSC driver");
#else
		applog(LOG_WARNING, "SC device detected but no BFLSC support compiled in!");
#endif
		goto unshin;
	}

	if (likely((!memcmp(buf, ">>>ID: ", 7)) && (s = strstr(buf + 3, ">>>")))) {
		s[0] = '\0';
		bitforce->name = strdup(buf + 7);
	} else {
		bitforce->name = (char *)blank;
	}

	// We have a real BitForce!
	applog(LOG_DEBUG, "%s (%s) identified as: '%s'",
		bitforce->drv->dname, bitforce->device_path, bitforce->name);

	/* Initially enable support for nonce range and disable it later if it
	 * fails */
	if (opt_bfl_noncerange) {
		bitforce->nonce_range = true;
		bitforce->sleep_ms = BITFORCE_SLEEP_MS;
		bitforce->kname = KNAME_RANGE;
	} else {
		bitforce->sleep_ms = BITFORCE_SLEEP_MS * 5;
		bitforce->kname = KNAME_WORK;
	}

	if (!add_cgpu(bitforce))
		goto unshin;

	update_usb_stats(bitforce);

	mutex_init(&bitforce->device_mutex);

	return true;

unshin:

	usb_uninit(bitforce);

shin:

	if (bitforce->name != blank) {
		free(bitforce->name);
		bitforce->name = NULL;
	}

	bitforce = usb_free_cgpu(bitforce);

	return false;
}
Esempio n. 20
0
static int64_t bitforce_get_result(struct thr_info *thr, struct work *work)
{
	struct cgpu_info *bitforce = thr->cgpu;
	unsigned int delay_time_ms;
	struct timeval elapsed;
	struct timeval now;
	char buf[BITFORCE_BUFSIZ+1];
	int amount;
	char *pnoncebuf;
	uint32_t nonce;

	while (1) {
		if (unlikely(thr->work_restart))
			return 0;

		mutex_lock(&bitforce->device_mutex);
		usb_write(bitforce, BITFORCE_WORKSTATUS, BITFORCE_WORKSTATUS_LEN, &amount, C_REQUESTWORKSTATUS);
		usb_read_nl(bitforce, buf, sizeof(buf)-1, &amount, C_GETWORKSTATUS);
		mutex_unlock(&bitforce->device_mutex);

		cgtime(&now);
		timersub(&now, &bitforce->work_start_tv, &elapsed);

		if (elapsed.tv_sec >= BITFORCE_LONG_TIMEOUT_S) {
			applog(LOG_ERR, "%s%i: took %ldms - longer than %dms",
				bitforce->drv->name, bitforce->device_id,
				tv_to_ms(elapsed), BITFORCE_LONG_TIMEOUT_MS);
			return 0;
		}

		if (amount > 0 && buf[0] && strncasecmp(buf, "B", 1)) /* BFL does not respond during throttling */
			break;

		/* if BFL is throttling, no point checking so quickly */
		delay_time_ms = (buf[0] ? BITFORCE_CHECK_INTERVAL_MS : 2 * WORK_CHECK_INTERVAL_MS);
		cgsleep_ms(delay_time_ms);
		bitforce->wait_ms += delay_time_ms;
	}

	if (elapsed.tv_sec > BITFORCE_TIMEOUT_S) {
		applog(LOG_ERR, "%s%i: took %ldms - longer than %dms",
			bitforce->drv->name, bitforce->device_id,
			tv_to_ms(elapsed), BITFORCE_TIMEOUT_MS);
		dev_error(bitforce, REASON_DEV_OVER_HEAT);

		/* Only return if we got nothing after timeout - there still may be results */
		if (amount == 0)
			return 0;
	} else if (!strncasecmp(buf, BITFORCE_EITHER, BITFORCE_EITHER_LEN)) {
		/* Simple timing adjustment. Allow a few polls to cope with
		 * OS timer delays being variably reliable. wait_ms will
		 * always equal sleep_ms when we've waited greater than or
		 * equal to the result return time.*/
		delay_time_ms = bitforce->sleep_ms;

		if (bitforce->wait_ms > bitforce->sleep_ms + (WORK_CHECK_INTERVAL_MS * 2))
			bitforce->sleep_ms += (bitforce->wait_ms - bitforce->sleep_ms) / 2;
		else if (bitforce->wait_ms == bitforce->sleep_ms) {
			if (bitforce->sleep_ms > WORK_CHECK_INTERVAL_MS)
				bitforce->sleep_ms -= WORK_CHECK_INTERVAL_MS;
			else if (bitforce->sleep_ms > BITFORCE_CHECK_INTERVAL_MS)
				bitforce->sleep_ms -= BITFORCE_CHECK_INTERVAL_MS;
		}

		if (delay_time_ms != bitforce->sleep_ms)
			  applog(LOG_DEBUG, "%s%i: Wait time changed to: %d, waited %u",
					bitforce->drv->name, bitforce->device_id,
					bitforce->sleep_ms, bitforce->wait_ms);

		/* Work out the average time taken. Float for calculation, uint for display */
		bitforce->avg_wait_f += (tv_to_ms(elapsed) - bitforce->avg_wait_f) / TIME_AVG_CONSTANT;
		bitforce->avg_wait_d = (unsigned int) (bitforce->avg_wait_f + 0.5);
	}

	applog(LOG_DEBUG, "%s%i: waited %dms until %s",
			bitforce->drv->name, bitforce->device_id,
			bitforce->wait_ms, buf);
	if (!strncasecmp(buf, BITFORCE_NO_NONCE, BITFORCE_NO_NONCE_MATCH))
		return bitforce->nonces;   /* No valid nonce found */
	else if (!strncasecmp(buf, BITFORCE_IDLE, BITFORCE_IDLE_MATCH))
		return 0;	/* Device idle */
	else if (strncasecmp(buf, BITFORCE_NONCE, BITFORCE_NONCE_LEN)) {
		bitforce->hw_errors++;
		applog(LOG_WARNING, "%s%i: Error: Get result reports: %s",
			bitforce->drv->name, bitforce->device_id, buf);
		bitforce_initialise(bitforce, true);
		return 0;
	}

	pnoncebuf = &buf[12];

	while (1) {
		hex2bin((void*)&nonce, pnoncebuf, 4);
#ifndef __BIG_ENDIAN__
		nonce = swab32(nonce);
#endif
		if (unlikely(bitforce->nonce_range && (nonce >= work->blk.nonce ||
			(work->blk.nonce > 0 && nonce < work->blk.nonce - bitforce->nonces - 1)))) {
				applog(LOG_WARNING, "%s%i: Disabling broken nonce range support",
					bitforce->drv->name, bitforce->device_id);
				bitforce->nonce_range = false;
				work->blk.nonce = 0xffffffff;
				bitforce->sleep_ms *= 5;
				bitforce->kname = KNAME_WORK;
		}
			
		submit_nonce(thr, work, nonce);
		if (strncmp(&pnoncebuf[8], ",", 1))
			break;
		pnoncebuf += 9;
	}

	return bitforce->nonces;
}
static int64_t ztex_scanhash(struct thr_info *thr, struct work *work,
                              __maybe_unused int64_t max_nonce)
{
	struct libztex_device *ztex;
	unsigned char sendbuf[44];
	int i, j, k;
	uint32_t *backlog;
	int backlog_p = 0, backlog_max;
	uint32_t *lastnonce;
	uint32_t nonce, noncecnt = 0;
	bool overflow, found;
	struct libztex_hash_data hdata[GOLDEN_BACKLOG];

	if (thr->cgpu->deven == DEV_DISABLED)
		return -1;

	ztex = thr->cgpu->device_ztex;

	memcpy(sendbuf, work->data + 64, 12);
	memcpy(sendbuf + 12, work->midstate, 32);

	ztex_selectFpga(ztex);
	i = libztex_sendHashData(ztex, sendbuf);
	if (i < 0) {
		// Something wrong happened in send
		applog(LOG_ERR, "%s: Failed to send hash data with err %d, retrying", ztex->repr, i);
		cgsleep_ms(500);
		i = libztex_sendHashData(ztex, sendbuf);
		if (i < 0) {
			// And there's nothing we can do about it
			ztex_disable(thr);
			applog(LOG_ERR, "%s: Failed to send hash data with err %d, giving up", ztex->repr, i);
			ztex_releaseFpga(ztex);
			return -1;
		}
	}
	ztex_releaseFpga(ztex);

	applog(LOG_DEBUG, "%s: sent hashdata", ztex->repr);

	lastnonce = calloc(1, sizeof(uint32_t)*ztex->numNonces);
	if (lastnonce == NULL) {
		applog(LOG_ERR, "%s: failed to allocate lastnonce[%d]", ztex->repr, ztex->numNonces);
		return -1;
	}

	/* Add an extra slot for detecting dupes that lie around */
	backlog_max = ztex->numNonces * (2 + ztex->extraSolutions);
	backlog = calloc(1, sizeof(uint32_t) * backlog_max);
	if (backlog == NULL) {
		applog(LOG_ERR, "%s: failed to allocate backlog[%d]", ztex->repr, backlog_max);
		return -1;
	}

	overflow = false;
	int count = 0;
	int validNonces = 0;
	double errorCount = 0;

	applog(LOG_DEBUG, "%s: entering poll loop", ztex->repr);
	while (!(overflow || thr->work_restart)) {
		count++;

		int sleepcount = 0;
		while (thr->work_restart == 0 && sleepcount < 25) {
			cgsleep_ms(10);
			sleepcount += 1;
		}

		if (thr->work_restart) {
			applog(LOG_DEBUG, "%s: New work detected", ztex->repr);
			break;
		}

		ztex_selectFpga(ztex);
		i = libztex_readHashData(ztex, &hdata[0]);
		if (i < 0) {
			// Something wrong happened in read
			applog(LOG_ERR, "%s: Failed to read hash data with err %d, retrying", ztex->repr, i);
			cgsleep_ms(500);
			i = libztex_readHashData(ztex, &hdata[0]);
			if (i < 0) {
				// And there's nothing we can do about it
				ztex_disable(thr);
				applog(LOG_ERR, "%s: Failed to read hash data with err %d, giving up", ztex->repr, i);
				free(lastnonce);
				free(backlog);
				ztex_releaseFpga(ztex);
				return -1;
			}
		}
		ztex_releaseFpga(ztex);

		if (thr->work_restart) {
			applog(LOG_DEBUG, "%s: New work detected", ztex->repr);
			break;
		}

		ztex->errorCount[ztex->freqM] *= 0.995;
		ztex->errorWeight[ztex->freqM] = ztex->errorWeight[ztex->freqM] * 0.995 + 1.0;

		for (i = 0; i < ztex->numNonces; i++) {
			nonce = hdata[i].nonce;
			if (nonce > noncecnt)
				noncecnt = nonce;
			// KRAMBLE don't overflow if nonce == 0 (eg on lockup)
			if ( (((0xffffffff - nonce) < (nonce - lastnonce[i])) || nonce < lastnonce[i]) && nonce ) {
				applog(LOG_DEBUG, "%s: overflow nonce=%08x lastnonce=%08x", ztex->repr, nonce, lastnonce[i]);
				overflow = true;
			} else
				lastnonce[i] = nonce;

			if (ztex_checkNonce(work, nonce) != (hdata->hash7)) {
				applog(LOG_WARNING, "%s: checkNonce failed for %08X", ztex->repr, nonce);

				// do not count errors in the first 500ms after sendHashData (2x250 wait time)
				if (count > 2) {
					thr->cgpu->hw_errors++;
					errorCount += (1.0 / ztex->numNonces);
				}
			}
			else
				validNonces++;


			for (j=0; j<=ztex->extraSolutions; j++) {
				nonce = hdata[i].goldenNonce[j];

				if (nonce == ztex->offsNonces) {
					continue;
				}

				// precheck the extraSolutions since they often fail
				if (j > 0 && ztex_checkNonce(work, nonce) != 0) {
					continue;
				}

				found = false;
				for (k = 0; k < backlog_max; k++) {
					if (backlog[k] == nonce) {
						found = true;
						break;
					}
				}
				if (!found) {
					applog(LOG_DEBUG, "%s: Share found N%dE%d", ztex->repr, i, j);
					backlog[backlog_p++] = nonce;

					if (backlog_p >= backlog_max)
						backlog_p = 0;

					work->blk.nonce = 0xffffffff;
					submit_nonce(thr, work, nonce);
					applog(LOG_DEBUG, "%s: submitted %08x", ztex->repr, nonce);
				}
			}
		}
	}

	// only add the errorCount if we had at least some valid nonces or
	// had no valid nonces in the last round
	if (errorCount > 0.0) {
		if (ztex->nonceCheckValid > 0 && validNonces == 0) {
			applog(LOG_ERR, "%s: resetting %.1f errors", ztex->repr, errorCount);
		}
		else {
			ztex->errorCount[ztex->freqM] += errorCount;
		}
	}

	// remember the number of valid nonces for the check in the next round
	ztex->nonceCheckValid = validNonces;

	ztex->errorRate[ztex->freqM] = ztex->errorCount[ztex->freqM] /	ztex->errorWeight[ztex->freqM] * (ztex->errorWeight[ztex->freqM] < 100? ztex->errorWeight[ztex->freqM] * 0.01: 1.0);
	if (ztex->errorRate[ztex->freqM] > ztex->maxErrorRate[ztex->freqM])
		ztex->maxErrorRate[ztex->freqM] = ztex->errorRate[ztex->freqM];

	if (!ztex_updateFreq(ztex)) {
		// Something really serious happened, so mark this thread as dead!
		free(lastnonce);
		free(backlog);
		
		return -1;
	}

	applog(LOG_DEBUG, "%s: exit %1.8X", ztex->repr, noncecnt);

	work->blk.nonce = 0xffffffff;

	free(lastnonce);
	free(backlog);

	return noncecnt;
}
Esempio n. 22
0
static bool gridseed_check_new_task(struct cgpu_info *gridseed, GRIDSEED_INFO *info)
{
	cgtimer_t ts_now, ts_res;
	bool ret = false;

	cgtimer_time(&ts_now);
	mutex_lock(&info->qlock);
	cgtimer_sub(&ts_now, &info->query_ts, &ts_res);
#ifndef WIN32
	if (ts_res.tv_sec > 0 || ts_res.tv_nsec > 350000000) {
#else
	if (ts_res.QuadPart > 3500000) {
#endif
		info->query_qlen = false;
		info->dev_queue_len = 1;
		info->needworks = 1;
		cgtimer_time(&info->query_ts);
	}
	mutex_unlock(&info->qlock);
}

/*
 * Thread to read response from Miner device
 */
static void *gridseed_get_results(void *userdata)
{
	struct cgpu_info *gridseed = (struct cgpu_info *)userdata;
	GRIDSEED_INFO *info = gridseed->device_data;
	struct thr_info *thr = info->thr;
	char threadname[24];
	unsigned char readbuf[GRIDSEED_READBUF_SIZE];
	int offset = 0, ret;

	snprintf(threadname, sizeof(threadname), "GridSeed_Recv/%d", gridseed->device_id);
	RenameThread(threadname);
	applog(LOG_NOTICE, "GridSeed: recv thread running, %s", threadname);

	while(likely(!gridseed->shutdown)) {
		unsigned char buf[GRIDSEED_READ_SIZE];

		if (offset >= GRIDSEED_READ_SIZE)
			gridseed_parse_results(gridseed, info, thr, readbuf, &offset);

		if (unlikely(offset + GRIDSEED_READ_SIZE >= GRIDSEED_READBUF_SIZE)) {
			applog(LOG_ERR, "Read buffer overflow, resetting %d", gridseed->device_id);
			offset = 0;
		}

		ret = gc3355_get_data(gridseed, buf, sizeof(buf));
		if (ret == LIBUSB_ERROR_NO_DEVICE)
			gridseed->shutdown = true;
		if (unlikely(ret != 0))
			continue;

		if (opt_debug) {
			applog(LOG_DEBUG, "GridSeed: get %d bytes", GRIDSEED_READ_SIZE);
			hexdump((uint8_t *)buf, GRIDSEED_READ_SIZE);
		}

		memcpy(readbuf + offset, buf, GRIDSEED_READ_SIZE);
		offset += GRIDSEED_READ_SIZE;
	}
	return NULL;
}

/*
 * Thread to send task and queue length query command to device
 */
static void *gridseed_send_command(void *userdata)
{
	struct cgpu_info *gridseed = (struct cgpu_info *)userdata;
	GRIDSEED_INFO *info = gridseed->device_data;
	char threadname[24];
	int i;

	snprintf(threadname, sizeof(threadname), "GridSeed_Send/%d", gridseed->device_id);
	RenameThread(threadname);
	applog(LOG_NOTICE, "GridSeed: send thread running, %s", threadname);

	while(likely(!gridseed->shutdown)) {
		cgsleep_ms(10);
		if (info->usefifo == 0) {
			/* mark the first work in queue as complete after several ms */
			if (gridseed_check_new_task(gridseed, info))
				continue;
		} else {
			/* send query command to device */
			if (gridseed_send_query_cmd(gridseed, info))
				continue;
		}
		/* send task to device */
		mutex_lock(&info->qlock);
		for(i=0; i<info->soft_queue_len; i++) {
			if (info->workqueue[i] && info->workqueue[i]->devflag == false) {
				if (gridseed_send_task(gridseed, info, info->workqueue[i])) {
					info->workqueue[i]->devflag = true;
					break;
				}
			}
		}
		mutex_unlock(&info->qlock);
		/* recv LTC task and send to device */
		gridseed_recv_ltc(gridseed, info);
	}
	return NULL;
}

/*========== functions for struct device_drv ===========*/

static void gridseed_detect(bool __maybe_unused hotplug)
{
	usb_detect(&gridseed_drv, gridseed_detect_one);
}
static int64_t ztex_scanhash(struct thr_info *thr, struct work *work,
                              __maybe_unused int64_t max_nonce)
{
	struct cgpu_info *cgpu = thr->cgpu;
	struct libztex_device *ztex;
	unsigned char sendbuf[44];
	int i, j, k;
	uint32_t *backlog;
	int backlog_p = 0, backlog_max;
	uint32_t *lastnonce;
	uint32_t nonce, noncecnt = 0;
	bool overflow, found;
	struct libztex_hash_data hdata[GOLDEN_BACKLOG];

	if (thr->cgpu->deven == DEV_DISABLED)
		return -1;

	ztex = thr->cgpu->device_ztex;

	memcpy(sendbuf, work->data + 64, 12);
	memcpy(sendbuf + 12, work->midstate, 32);

	ztex_selectFpga(ztex, cgpu->proc_id);
	i = libztex_sendHashData(ztex, sendbuf);
	if (i < 0) {
		// Something wrong happened in send
		applog(LOG_ERR, "%"PRIpreprv": Failed to send hash data with err %d, retrying", cgpu->proc_repr, i);
		cgsleep_ms(500);
		i = libztex_sendHashData(ztex, sendbuf);
		if (i < 0) {
			// And there's nothing we can do about it
			ztex_disable(thr);
			applog(LOG_ERR, "%"PRIpreprv": Failed to send hash data with err %d, giving up", cgpu->proc_repr, i);
			ztex_releaseFpga(ztex);
			return -1;
		}
	}
	ztex_releaseFpga(ztex);

	applog(LOG_DEBUG, "%"PRIpreprv": sent hashdata", cgpu->proc_repr);

	lastnonce = calloc(1, sizeof(uint32_t)*ztex->numNonces);
	if (lastnonce == NULL) {
		applog(LOG_ERR, "%"PRIpreprv": failed to allocate lastnonce[%d]", cgpu->proc_repr, ztex->numNonces);
		return -1;
	}

	/* Add an extra slot for detecting dupes that lie around */
	backlog_max = ztex->numNonces * (2 + ztex->extraSolutions);
	backlog = calloc(1, sizeof(uint32_t) * backlog_max);
	if (backlog == NULL) {
		applog(LOG_ERR, "%"PRIpreprv": failed to allocate backlog[%d]", cgpu->proc_repr, backlog_max);
		free(lastnonce);
		return -1;
	}

	overflow = false;
	int count = 0;

	applog(LOG_DEBUG, "%"PRIpreprv": entering poll loop", cgpu->proc_repr);
	while (!(overflow || thr->work_restart)) {
		count++;
		if (!restart_wait(thr, 250))
		{
			applog(LOG_DEBUG, "%"PRIpreprv": New work detected", cgpu->proc_repr);
			break;
		}
		ztex_selectFpga(ztex, cgpu->proc_id);
		i = libztex_readHashData(ztex, &hdata[0]);
		if (i < 0) {
			// Something wrong happened in read
			applog(LOG_ERR, "%"PRIpreprv": Failed to read hash data with err %d, retrying", cgpu->proc_repr, i);
			cgsleep_ms(500);
			i = libztex_readHashData(ztex, &hdata[0]);
			if (i < 0) {
				// And there's nothing we can do about it
				ztex_disable(thr);
				applog(LOG_ERR, "%"PRIpreprv": Failed to read hash data with err %d, giving up", cgpu->proc_repr, i);
				free(lastnonce);
				free(backlog);
				ztex_releaseFpga(ztex);
				return -1;
			}
		}
		ztex_releaseFpga(ztex);

		if (thr->work_restart) {
			applog(LOG_DEBUG, "%"PRIpreprv": New work detected", cgpu->proc_repr);
			break;
		}

		dclk_gotNonces(&ztex->dclk);

		for (i = 0; i < ztex->numNonces; i++) {
			nonce = hdata[i].nonce;
			if (nonce > noncecnt)
				noncecnt = nonce;
			if (((0xffffffff - nonce) < (nonce - lastnonce[i])) || nonce < lastnonce[i]) {
				applog(LOG_DEBUG, "%"PRIpreprv": overflow nonce=%08x lastnonce=%08x", cgpu->proc_repr, nonce, lastnonce[i]);
				overflow = true;
			} else
				lastnonce[i] = nonce;

			if (!ztex_checkNonce(cgpu, work, &hdata[i])) {
				// do not count errors in the first 500ms after sendHashData (2x250 wait time)
				if (count > 2)
					dclk_errorCount(&ztex->dclk, 1.0 / ztex->numNonces);

				inc_hw_errors_only(thr);
			}

			for (j=0; j<=ztex->extraSolutions; j++) {
				nonce = hdata[i].goldenNonce[j];

				if (nonce == ztex->offsNonces) {
					continue;
				}

				found = false;
				for (k = 0; k < backlog_max; k++) {
					if (backlog[k] == nonce) {
						found = true;
						break;
					}
				}
				if (!found) {
					backlog[backlog_p++] = nonce;

					if (backlog_p >= backlog_max)
						backlog_p = 0;

					work->blk.nonce = 0xffffffff;
					if (!j || test_nonce(work, nonce, false))
						submit_nonce(thr, work, nonce);
					applog(LOG_DEBUG, "%"PRIpreprv": submitted %08x (from N%dE%d)", cgpu->proc_repr, nonce, i, j);
				}
			}
		}
	}

	dclk_preUpdate(&ztex->dclk);

	if (!ztex_updateFreq(thr)) {
		// Something really serious happened, so mark this thread as dead!
		free(lastnonce);
		free(backlog);
		
		return -1;
	}

	applog(LOG_DEBUG, "%"PRIpreprv": exit %1.8X", cgpu->proc_repr, noncecnt);

	work->blk.nonce = 0xffffffff;

	free(lastnonce);
	free(backlog);

	return noncecnt;
}
Esempio n. 24
0
static int hashratio_stratum_pkgs(int fd, struct pool *pool, struct thr_info *thr)
{
	const int merkle_offset = 36;
	struct hashratio_pkg pkg;
	int i, a, b, tmp;
	unsigned char target[32];
	int job_id_len;

	/* Send out the first stratum message STATIC */
	applog(LOG_DEBUG, "hashratio: Pool stratum message STATIC: %d, %d, %d, %d, %d, %d",
	       pool->coinbase_len,
	       pool->nonce2_offset,
	       pool->n2size,
	       merkle_offset,
	       pool->merkles,
		   pool->pool_no);
	memset(pkg.data, 0, HRTO_P_DATA_LEN);
	tmp = be32toh(pool->coinbase_len);
	memcpy(pkg.data, &tmp, 4);

	tmp = be32toh(pool->nonce2_offset);
	memcpy(pkg.data + 4, &tmp, 4);

	tmp = be32toh(pool->n2size);
	memcpy(pkg.data + 8, &tmp, 4);

	tmp = be32toh(merkle_offset);
	memcpy(pkg.data + 12, &tmp, 4);

	tmp = be32toh(pool->merkles);
	memcpy(pkg.data + 16, &tmp, 4);

	tmp = be32toh((int)pool->swork.diff);
	memcpy(pkg.data + 20, &tmp, 4);

	tmp = be32toh((int)pool->pool_no);
	memcpy(pkg.data + 24, &tmp, 4);

	hashratio_init_pkg(&pkg, HRTO_P_STATIC, 1, 1);
	while (hashratio_send_pkg(fd, &pkg, thr) != HRTO_SEND_OK)
		;

	set_target(target, pool->swork.diff);
	memcpy(pkg.data, target, 32);
	if (opt_debug) {
		char *target_str;
		target_str = bin2hex(target, 32);
		applog(LOG_DEBUG, "hashratio: Pool stratum target: %s", target_str);
		free(target_str);
	}
	hashratio_init_pkg(&pkg, HRTO_P_TARGET, 1, 1);
	while (hashratio_send_pkg(fd, &pkg, thr) != HRTO_SEND_OK)
		;


	applog(LOG_DEBUG, "hashratio: Pool stratum message JOBS_ID: %s",
	       pool->swork.job_id);
	memset(pkg.data, 0, HRTO_P_DATA_LEN);

	job_id_len = strlen(pool->swork.job_id);
	job_id_len = job_id_len >= 4 ? 4 : job_id_len;
	for (i = 0; i < job_id_len; i++) {
		pkg.data[i] = *(pool->swork.job_id + strlen(pool->swork.job_id) - 4 + i);
	}
	hashratio_init_pkg(&pkg, HRTO_P_JOB_ID, 1, 1);
	while (hashratio_send_pkg(fd, &pkg, thr) != HRTO_SEND_OK)
		;

	a = pool->coinbase_len / HRTO_P_DATA_LEN;
	b = pool->coinbase_len % HRTO_P_DATA_LEN;
	applog(LOG_DEBUG, "pool->coinbase_len: %d", pool->coinbase_len);
	applog(LOG_DEBUG, "hashratio: Pool stratum message COINBASE: %d %d", a, b);
	for (i = 0; i < a; i++) {
		memcpy(pkg.data, pool->coinbase + i * 32, 32);
		hashratio_init_pkg(&pkg, HRTO_P_COINBASE, i + 1, a + (b ? 1 : 0));
		while (hashratio_send_pkg(fd, &pkg, thr) != HRTO_SEND_OK)
			;
		if (i % 25 == 0) {
			cgsleep_ms(2);
		}
	}
	if (b) {
		memset(pkg.data, 0, HRTO_P_DATA_LEN);
		memcpy(pkg.data, pool->coinbase + i * 32, b);
		hashratio_init_pkg(&pkg, HRTO_P_COINBASE, i + 1, i + 1);
		while (hashratio_send_pkg(fd, &pkg, thr) != HRTO_SEND_OK)
			;
	}

	b = pool->merkles;
	applog(LOG_DEBUG, "hashratio: Pool stratum message MERKLES: %d", b);
	for (i = 0; i < b; i++) {
		memset(pkg.data, 0, HRTO_P_DATA_LEN);
		memcpy(pkg.data, pool->swork.merkle_bin[i], 32);
		hashratio_init_pkg(&pkg, HRTO_P_MERKLES, i + 1, b);
		while (hashratio_send_pkg(fd, &pkg, thr) != HRTO_SEND_OK)
			;
	}

	applog(LOG_DEBUG, "hashratio: Pool stratum message HEADER: 4");
	for (i = 0; i < 4; i++) {
		memset(pkg.data, 0, HRTO_P_HEADER);
		memcpy(pkg.data, pool->header_bin + i * 32, 32);
		hashratio_init_pkg(&pkg, HRTO_P_HEADER, i + 1, 4);
		while (hashratio_send_pkg(fd, &pkg, thr) != HRTO_SEND_OK)
			;

	}
	return 0;
}