int test_main(void){ int a=3, c; int b=-5; void *args1[3]; void *args2[3]; int tab[100]; int i,res; work_t *work1,*work2,*work3,*work4; int nb_threads = get_nb_threads(); printf("nb_threads= %d\n", nb_threads); args1[0] = &a; args1[1] = &b; work1 = create_work(2,args1,f1); for (i=0;i<100;i++) tab[i]=i; c=100; args2[0] = &c; args2[1] = tab; args2[2] = &res; work2 = create_work(3, args2, f2); work3 = create_work(4, args2, f2); work4 = create_work(5, args2, f2); submit_work(work1,0); submit_work(work2,1); submit_work(work3,1); submit_work(work4,1); terminate_thread_pool(); wait_work_completion(work1); wait_work_completion(work2); wait_work_completion(work3); wait_work_completion(work4); printf("res=%d\n",res); destroy_work(work1); destroy_work(work2); destroy_work(work3); destroy_work(work4); return 0; }
int main() { dispatch_queue_t q[PRIORITIES]; int i; #if USE_SET_TARGET_QUEUE test_start("Dispatch Priority (Set Target Queue)"); for(i = 0; i < PRIORITIES; i++) { q[i] = dispatch_queue_create(labels[i], NULL); test_ptr_notnull("q[i]", q[i]); assert(q[i]); dispatch_set_target_queue(as_do(q[i]), dispatch_get_global_queue(priorities[i], 0)); dispatch_queue_set_width(q[i], DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS); } #else test_start("Dispatch Priority"); for(i = 0; i < PRIORITIES; i++) { q[i] = dispatch_get_global_queue(priorities[i], 0); } #endif for(i = 0; i < PRIORITIES; i++) { submit_work(q[i], &counts[i].count); } dispatch_main(); }
void terminate_thread_pool(){ int id; int *ret=NULL; work_t work; if(pool){ work.task=NULL; for (id=0;id<pool->nb_threads;id++){ submit_work(&work,id); } for (id=0;id<pool->nb_threads;id++){ pthread_join(pool->thread_list[id],(void **) &ret); FREE(ret); pthread_cond_destroy(pool->cond_var +id); pthread_mutex_destroy(pool->list_lock +id); if (pool->working_list[id].next != NULL) if(verbose_level >= WARNING) printf("Working list of thread %d not empty!\n",id); } hwloc_topology_destroy(pool->topology); FREE(pool -> thread_list); FREE(pool -> working_list); FREE(pool -> cond_var); FREE(pool -> list_lock); FREE(pool -> local); FREE(pool); pool = NULL; } }
static void *miner_thread(void *thr_id_int) { int thr_id = (unsigned long) thr_id_int; int failures = 0; uint32_t max_nonce = 0xffffff, max_nonce2; CURL *curl; if (opt_randomize) { srandom(time(0)); } curl = curl_easy_init(); if (!curl) { fprintf(stderr, "CURL initialization failed\n"); return NULL; } while (1) { struct work work __attribute__((aligned(128))); unsigned long hashes_done; struct timeval tv_start, tv_end, diff; bool rc; /* obtain new work from bitcoin */ if (!get_work(curl, &work)) { fprintf(stderr, "json_rpc_call failed, "); if ((opt_retries >= 0) && (++failures > opt_retries)) { fprintf(stderr, "terminating thread\n"); return NULL; /* exit thread */ } /* pause, then restart work loop */ fprintf(stderr, "retry after %d seconds\n", opt_fail_pause); sleep(opt_fail_pause); continue; } if (!validate_midstate(work.data, work.midstate)) { printf("SERVER PROBLEM: work.midstate does not equal SHA256 state after first 64-byte chunk\n"); } hashes_done = 0; gettimeofday(&tv_start, NULL); if (opt_randomize) { max_nonce2 = max_nonce*(1.0 + (double)random()/(RAND_MAX+1.0) - 0.5); } else { max_nonce2 = max_nonce; } /* scan nonces for a proof-of-work hash */ switch (opt_algo) { case ALGO_C: rc = scanhash_c(work.midstate, work.data + 64, work.hash1, work.hash, work.target, max_nonce2, &hashes_done); break; #ifdef WANT_SSE2_4WAY case ALGO_4WAY: { unsigned int rc4 = ScanHash_4WaySSE2(work.midstate, work.data + 64, work.hash1, work.hash, work.target, max_nonce2, &hashes_done); rc = (rc4 == -1) ? false : true; } break; #endif #ifdef WANT_VIA_PADLOCK case ALGO_VIA: rc = scanhash_via(work.data, work.target, max_nonce2, &hashes_done); break; #endif case ALGO_CRYPTOPP: rc = scanhash_cryptopp(work.midstate, work.data + 64, work.hash1, work.hash, work.target, max_nonce2, &hashes_done); break; #ifdef WANT_CRYPTOPP_ASM32 case ALGO_CRYPTOPP_ASM32: rc = scanhash_asm32(work.midstate, work.data + 64, work.hash1, work.hash, work.target, max_nonce2, &hashes_done); break; #endif default: /* should never happen */ return NULL; } /* record scanhash elapsed time */ gettimeofday(&tv_end, NULL); timeval_subtract(&diff, &tv_end, &tv_start); hashmeter(thr_id, &diff, hashes_done); /* adjust max_nonce to meet target scan time */ if (diff.tv_sec > (opt_scantime * 2)) max_nonce /= 2; /* large decrease */ else if ((diff.tv_sec > opt_scantime) && (max_nonce > 1500000)) max_nonce -= 1000000; /* small decrease */ else if ((diff.tv_sec < opt_scantime) && (max_nonce < 0xffffec76)) max_nonce += 100000; /* small increase */ /* if nonce found, submit work */ if (rc) submit_work(curl, &work); failures = 0; } curl_easy_cleanup(curl); return NULL; }
static void *miner_thread(void *userdata) { struct thr_info *mythr = userdata; int thr_id = mythr->id; uint32_t max_nonce = 0xffffff; /* Set worker threads to nice 19 and then preferentially to SCHED_IDLE * and if that fails, then SCHED_BATCH. No need for this to be an * error if it fails */ setpriority(PRIO_PROCESS, 0, 19); drop_policy(); /* Cpu affinity only makes sense if the number of threads is a multiple * of the number of CPUs */ if (!(opt_n_threads % num_processors)) affine_to_cpu(mythr->id, mythr->id % num_processors); while (1) { struct work work __attribute__((aligned(128))); uint64_t hashes_done; struct timeval tv_start, tv_end, diff; uint64_t max64; bool rc; /* obtain new work from internal workio thread */ if (unlikely(!get_work(mythr, &work))) { applog(LOG_ERR, "work retrieval failed, exiting " "mining thread %d", mythr->id); goto out; } hashes_done = 0; gettimeofday(&tv_start, NULL); rc = scanhash(thr_id, work.data, work.target, max_nonce, &hashes_done); /* record scanhash elapsed time */ gettimeofday(&tv_end, NULL); timeval_subtract(&diff, &tv_end, &tv_start); hashmeter(thr_id, &diff, hashes_done); /* adjust max_nonce to meet target scan time */ if (diff.tv_usec > 500000) diff.tv_sec++; if (diff.tv_sec > 0) { max64 = (hashes_done / 65536 * opt_scantime) / diff.tv_sec; if (max64 > 0xfffffffaULL) max64 = 0xfffffffaULL; max_nonce = max64; } /* if nonce found, submit work */ if (rc && !submit_work(mythr, &work)) break; } out: tq_freeze(mythr->q); return NULL; }
static int64_t bitfury_scanHash(struct thr_info *thr) { static struct bitfury_device *devices; // TODO Move somewhere to appropriate place int chip_n; int chip; uint64_t hashes = 0; unsigned char line[2048]; char stat_lines[32][256] = {0}; static first = 0; //TODO Move to detect() int i; static int shift_number = 1; static struct timeval spi_started; struct timeval now; struct cgpu_info *cgpu = thr->cgpu; devices = thr->cgpu->devices; chip_n = thr->cgpu->chip_n; if (!first) { for (i = 0; i < chip_n; i++) { devices[i].osc6_bits = 50; } set_chip_opts(devices, chip_n); for (i = 0; i < chip_n; i++) { send_reinit(devices[i].slot, devices[i].fasync, devices[i].osc6_bits); } cgtime(&spi_started); } first = 1; cgtime(&now); int wait=1000000*(now.tv_sec-spi_started.tv_sec)+now.tv_usec-spi_started.tv_usec; if(wait<800000){ //cgsleep_ms((800000-wait)/1000); if(restart_wait(thr, (800000-wait)/1000) != ETIMEDOUT) { //purge work for (;chip < chip_n; chip++) { if(devices[chip].bfwork.work != NULL) { work_completed(thr->cgpu, devices[chip].bfwork.work); } devices[chip].bfwork.work = NULL; devices[chip].bfwork.results_n = 0; devices[chip].bfwork.results_sent = 0; } } } for (chip = 0; chip < chip_n; chip++) { devices[chip].job_switched = 0; if(!devices[chip].bfwork.work) { devices[chip].bfwork.work = get_queued(thr->cgpu); if (devices[chip].bfwork.work == NULL) { return 0; } work_to_payload(&(devices[chip].bfwork.payload), devices[chip].bfwork.work); } } cgtime(&spi_started); libbitfury_sendHashData(devices, chip_n); chip = 0; int high = 0; double aveg = 0.0; int total = 0; int futures =0; for (;chip < chip_n; chip++) { if (devices[chip].job_switched) { int i=0; struct work *work = devices[chip].bfwork.work; struct work *owork = devices[chip].obfwork.work; struct work *o2work = devices[chip].o2bfwork.work; if (owork) i+=submit_work(&devices[chip].obfwork, thr); if (o2work) i+=submit_work(&devices[chip].o2bfwork, thr); if (work) i+=submit_work(&devices[chip].bfwork, thr); high = high > i?high:i; total+=i; devices[chip].job_switched = 0; if (o2work) work_completed(thr->cgpu, o2work); //printf("%d %d %d\n",devices[chip].o2bfwork.results_n,devices[chip].obfwork.results_n,devices[chip].bfwork.results_n); memcpy (&(devices[chip].o2bfwork),&(devices[chip].obfwork),sizeof(struct bitfury_work)); memcpy (&(devices[chip].obfwork),&(devices[chip].bfwork),sizeof(struct bitfury_work)); devices[chip].bfwork.work = NULL; devices[chip].bfwork.results_n = 0; devices[chip].bfwork.results_sent = 0; hashes += 0xffffffffull * i; } /* if(shift_number % 100 == 0) { int len = strlen(stat_lines[devices[chip].slot]); snprintf(stat_lines[devices[chip].slot]+len,256-len,"%d: %d/%d ",chip,devices[chip].nonces_found/devices[chip].nonce_errors); } */ } aveg = (double) total / chip_n; //applog(LOG_WARNING, "high: %d aver: %4.2f total %d futures %d", high, aveg,total,futures); if(shift_number % 100 == 0) { /* applog(LOG_WARNING,stat_lines[0]); applog(LOG_WARNING,stat_lines[1]); applog(LOG_WARNING,stat_lines[2]); applog(LOG_WARNING,stat_lines[3]); */ } shift_number++; return hashes; }