int bitcoin_parallel(const unsigned int processcount) { printf("\n\nStarting bitcoin_parallel\n"); // Start, end time unsigned long start,end; // Set start time start = current_time_millis(); // TODO: Create a Blockheader object and fill it with the initial data using the getWork Method Blockheader * b_header = malloc(sizeof(Blockheader)); getWork(b_header); // TODO: Split the calculation of the hashes into several segments based on the processcount int hashes_per_process = MAX_HASHES / processcount; for (int i = 0; i < processcount; i++) { Blockheader * temp = malloc(sizeof(Blockheader)); memcpy(temp, b_header, sizeof(Blockheader)); //temp->nonce += i * 0; } // TODO: Spawn a process for each segment // TODO: If a hash has the appropriate difficulty print it on the console using print_hash // TODO: Wait until all children finish before exiting end = current_time_millis(); printf("Calculation finished after %.3fs\n", (double) (end - start) / 1000); return EXIT_FAILURE; }
int bitcoin_parallel(const unsigned int processcount) { printf("\n\nStarting bitcoin_parallel\n"); // Start, end time unsigned long start,end; // Set start time start = current_time_millis(); // TODO: Create a Blockheader object and fill it with the initial data using the getWork Method Blockheader * blockheader = malloc(sizeof(Blockheader)); getWork(blockheader); // TODO: Split the calculation of the hashes into several segments based on the processcount int segment_size = ceil((double) MAX_HASHES / processcount); unsigned long starting_nonce = toulong(blockheader->nonce); //printf("starting_nonce am anfang: %ld\n", starting_nonce); // TODO: Spawn a process for each segment int pos = 0; int * child_pids = malloc(sizeof(int) * processcount); for (int i = 0; i < processcount; i++) { child_pids[i] = 0; } for (; pos < processcount; pos++) { child_pids[pos] = fork(); if (child_pids[pos] < 0) { printf("failed forking"); return EXIT_FAILURE; } if (child_pids[pos] == 0) { break; } } if (pos < processcount) { starting_nonce += pos * segment_size; char * n = to_reversed_char_arr(starting_nonce); memcpy(&(blockheader->nonce), n, sizeof(char) * 4); calculate_hash(blockheader, segment_size); return EXIT_SUCCESS; } int status; for (int i = 0; i < processcount; i++) { wait(&status); } end = current_time_millis(); printf("Calculation finished after %.3fs\n", (double) (end - start) / 1000); return EXIT_SUCCESS; }
/* Calculates Blockhashes in a simple loop. */ int bitcoin_simple() { printf("Starting bitcoin_simple\n"); // Start, end time unsigned long start,end; // Set start time start = current_time_millis(); // Creates and retrieves the Block-Header information Blockheader * blockheader = malloc(sizeof(Blockheader)); // The getWork method fills an empty Blockheader struct with all the neccesary information needed to calcualte the Hash of a Block. getWork(blockheader); // The nonce is the value that is incremented in each run to get a different hash value char * n = malloc(sizeof(char)*4); memcpy(n,&(blockheader->nonce),sizeof(char)*4); // The values in the Blockheader are actually in reverse byte order and need to be reversed in order to increment the nonce value. byte_reversal(n,sizeof(char)*4); // Convert the 4 byte long raw data into an unsinged long unsigned long starting_nonce = n[0] << 24 | n[1] << 16 | n[2] << 8 | n[3]; // The nonce value we received in the getWork method is the actual starting nonce value. We start to calculate hashes with this initial nonce and increase it by one in each run. unsigned long nonce = starting_nonce; char * hash; // In practice it is very hard to find a valid hash, so in this exercise we will limit the amount of hashes we calculate. for(;nonce<=(starting_nonce+MAX_HASHES);nonce++) { // put current nonce in blockheader object // first, shift long back to char[4] n[0] = nonce >> 24; n[1] = nonce >> 16; n[2] = nonce >> 8; n[3] = nonce; // reverse byte order byte_reversal(n,sizeof(char)*4); // put n into blockheader blockheader->nonce[0] = n[0]; blockheader->nonce[1] = n[1]; blockheader->nonce[2] = n[2]; blockheader->nonce[3] = n[3]; // calculate the hash using the sha-256 hashing algorithm size_t size = getData(blockheader,&hash); size = sha256_digest(hash,size,&hash); // To calculate a valid hash, we need to do two hashing passes size = sha256_digest(hash,size,&hash); if(check_hash(hash,(int)size)) { printf("%ld : ", nonce); print_hash(hash,size); } } end = current_time_millis(); printf("Calculation finished after %.3fs\n", (double) (end - start) / 1000); free(blockheader); return EXIT_SUCCESS; }
void *work_routine(void *arg) { Worker *mine = (Worker *)arg; Crew *crew = mine->crew; int status; status = pthread_mutex_lock(&crew->mutex); if (status != 0) { perror("mutex lock"); exit(EXIT_FAILURE); } while (crew->work_count == 0) { status = pthread_cond_wait(&crew->go, &crew->mutex); if (status != 0) { perror("cond wait go"); exit(EXIT_FAILURE); } } status = pthread_mutex_unlock(&crew->mutex); if (status != 0) { perror("thread mutex unlock"); exit(EXIT_FAILURE); } Work *work; while (1) { status = getWork(crew, &work); if (status != 0) { perror("getWork"); exit(EXIT_FAILURE); } doWork(crew, work); status = pthread_mutex_lock(&crew->mutex); if (status != 0) { perror("mutex lock"); exit(EXIT_FAILURE); } while (crew->work_count <= 0) { status = pthread_cond_broadcast(&crew->done); if (status != 0) { perror("pthread_cond_broadcast done"); exit(EXIT_FAILURE); } } status = pthread_mutex_unlock(&crew->mutex); if (status != 0) { perror("unlock mutex"); exit(EXIT_FAILURE); } } return NULL; }
//============================================================================= // METHOD: SPELLthreadWorker::run() //============================================================================= void SPELLthreadWorker::run() { while(true) { m_newWork.wait(); m_newWork.clear(); if (!isShutdown()) { getWork()->doWork(); } deleteWork(); } }
VCard::AddressLabel QtVCardAddressLabelField::getAddressLabel() const { VCard::AddressLabel addressLabel; addressLabel.isPreferred = getPreferred(); addressLabel.isHome = getHome(); addressLabel.isWork = getWork(); addressLabel.deliveryType = domesticRadioButton->isChecked() ? VCard::DomesticDelivery : (internationalRadioButton->isChecked() ? VCard::InternationalDelivery : VCard::None); addressLabel.isPostal = getTagComboBox()->isTagSet("postal"); addressLabel.isParcel = getTagComboBox()->isTagSet("parcel"); std::string lines = Q2PSTRING(addressLabelPlainTextEdit->toPlainText()); boost::split(addressLabel.lines, lines, boost::is_any_of("\n")); return addressLabel; }
VCard::Address QtVCardAddressField::getAddress() const { VCard::Address address; address.isPreferred = getPreferred(); address.isHome = getHome(); address.isWork = getWork(); address.deliveryType = domesticRadioButton->isChecked() ? VCard::DomesticDelivery : (internationalRadioButton->isChecked() ? VCard::InternationalDelivery : VCard::None); address.isPostal = getTagComboBox()->isTagSet("postal"); address.isParcel = getTagComboBox()->isTagSet("parcel"); address.street = Q2PSTRING(streetLineEdit->text()); address.poBox = Q2PSTRING(poboxLineEdit->text()); address.addressExtension = Q2PSTRING(addressextLineEdit->text()); address.locality = Q2PSTRING(cityLineEdit->text()); address.postalCode = Q2PSTRING(pocodeLineEdit->text()); address.region = Q2PSTRING(regionLineEdit->text()); address.country = Q2PSTRING(countryLineEdit->text()); return address; }
void workerThread (State *s) { taskID work; watchList *tasksToNotify, next; bool canQueue; do { task = getWork(dispatch); /* Do stuff */ atomicWrite(status[work] = INPROGRESS); doStuff(work); atomicWrite(status[work] = DONE); /* NOTE : Race condition */ tasksToNotify = getWatches(work); while (tasksToNotify != NULL) { next = tasksToNotify->tail; canQueue = TRUE; foreach (dep in dep[tasksToNotify->id]) { /* OPT : Watch ordering */ if (atomicRead(status[dep]) != DONE) { /* NOTE : Race condition */ if (moveWatch(watch[dep],tasksToNotify)) { canQueue = FALSE; break; } else { /* Have hit the race condition, try the next option */ assert(atomicRead(status[dep]) == DONE); } } } if (canQueue) { /* OPT : Save one work item */ addWork(*dispatch,tasksToNotify->id); deleteWatch(tasksToNotify); } tasksToNotify = next; } } while (1); /* NOTE : some kind of control for thread exit needed */ return; }
void ReplicationExecutor::run() { _networkInterface->startup(); _dblockWorkers.startThreads(); std::pair<WorkItem, CallbackHandle> work; while ((work = getWork()).first.callback) { { boost::lock_guard<boost::mutex> lk(_terribleExLockSyncMutex); const Status inStatus = work.first.isCanceled ? Status(ErrorCodes::CallbackCanceled, "Callback canceled") : Status::OK(); makeNoExcept(stdx::bind(work.first.callback, CallbackData(this, work.second, inStatus)))(); } signalEvent(work.first.finishedEvent); } finishShutdown(); _networkInterface->shutdown(); }
void RunPotentialParamsQueue::start() { const fs::path workingDir = this->workingDir(); while(getWork()) { while(!myParamsQueue.empty()) { spipe::SharedDataType & sweepSharedData = mySubpipeEngine->sharedData(); const posix_time::ptime startTime = posix_time::microsec_clock::universal_time(); myCurrentParams = myParamsQueue.front(); // Store the potential parameters in global memory getEngine()->globalData().setParameters(mySettings.tag, myCurrentParams); const fs::path sweepPath = workingDir / common::generateParamDirName(myCurrentParams, getEngine()->globalData().getSeedName()); // Set a directory for this set of parameters sweepSharedData.setWorkingDir(sweepPath); mySubpipeEngine->run(); myDoneParams.push_back(myCurrentParams); myParamsQueue.pop(); // Save how long it took to process that parameter set myWorkItemsTiming.insert( posix_time::microsec_clock::universal_time() - startTime); // Send the resultant structures down our pipe releaseBufferedStructures(sweepPath.string()); } updateDoneParams(); updateWorkChunkSize(); } }
int bitcoin_loop(const unsigned int processcount) { printf("\n\nStarting bitcoin_loop\n"); // Start, end time unsigned long start,end; // Set start time start = current_time_millis(); Blockheader * blockheader = malloc(sizeof(Blockheader)); getWork(blockheader); // TODO: Split the calculation of the hashes into several segments based on the processcount int segment_size = ceil((double)MAX_HASHES / processcount); for (int i = 0; i < processcount; i++) { calculate_hash(blockheader, segment_size); //printf("nonce after run #%i: %ld\n", i, toulong(blockheader->nonce)); } end = current_time_millis(); printf("Calculation finished after %.3fs\n", (double) (end - start) / 1000); free(blockheader); return EXIT_SUCCESS; }
void SelectExternalDialog::saveSetting() { m_liteApp->settings()->setValue("litedebug/external/cmd",getCmd()); m_liteApp->settings()->setValue("litedebug/external/args",getArgs()); m_liteApp->settings()->setValue("litedebug/external/work",getWork()); }
int main(int argc, char **argv){ FILE *file = fopen("file1","r"); FILE *out = NULL; char str_buf[1024][50]; unsigned str_buf_in = 0; unsigned str_buf_out = 0; char str[50]; int read_finish = 0; int num_read = 0, num_write = 0; char **input_filenames = NULL; int input_len; //num of input files FILE **input_files = NULL; int i,j; double elapsed_time; int mapping_done = 0;//done when all mapper thread done struct timeval tvalBefore, tvalAfter; ////locks/// int rank, size, len; char name[MPI_MAX_PROCESSOR_NAME]; omp_set_num_threads(4); MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Get_processor_name(name, &len); MPI_Status status; omp_init_lock(&worklock); omp_init_lock(&inclock); omp_init_lock(&readlock); omp_init_lock(&readerlock); omp_init_lock(&mapperlock); if(argc < 5){ printf("Usage ./mapreduce -in [input files].... -out [output file]\n"); return 0; }else{ if(strcmp("-in",argv[1])){ printf("Usage ./mapreduce -in [input files].... -out [output file]\n"); return 0; } for(i=2;i<argc;i++){ //start from first input file if(!strcmp("-out",argv[i])){ break; } } input_len = i - 2; input_filenames = (char**)malloc(sizeof(char*)*input_len); for(j=0;j<input_len;j++) input_filenames[j] = (char*)malloc(sizeof(char)*50); for(i=2,j=0;j<input_len;i++,j++){ strcpy(input_filenames[j],argv[i]); } input_files = read_in(input_filenames,input_len,0); if(strcmp("-out",argv[2+input_len])){ printf("output file missing, using default name 'out'\n"); out = fopen("out","w"); }else{ out = fopen(argv[3+input_len],"w"); } } omp_set_num_threads(8); fifoQ *queue_to_map = initQ(1000000, "queue_to_map"); fifoQ *queue_to_reduce = initQ(1000000, "queue_to_map"); fifoQ **queues_to_map = (fifoQ**)malloc(sizeof(fifoQ*)*5); queues_to_map[0] = initQ(1000000, "queue_to_map0"); queues_to_map[1] = initQ(1000000, "queue_to_map1"); queues_to_map[2] = initQ(1000000, "queue_to_map2"); queues_to_map[3] = initQ(1000000, "queue_to_map3"); queues_to_map[4] = initQ(1000000, "queue_to_map4"); fifoQ **queues_to_reduce = (fifoQ**)malloc(sizeof(fifoQ*)*5); queues_to_reduce[0] = initQ(1000000, "queue_to_reduce0"); queues_to_reduce[1] = initQ(1000000, "queue_to_reduce1"); queues_to_reduce[2] = initQ(1000000, "queue_to_reduce2"); queues_to_reduce[3] = initQ(1000000, "queue_to_reduce3"); queues_to_reduce[4] = initQ(1000000, "queue_to_reduce4"); fifoQ **queues_reduced = (fifoQ**)malloc(sizeof(fifoQ*)*5); fifoQ *final_queue = initQ(1000000, "final Q"); int sendsize = input_len/size + (input_len % size - rank > 0 ? 1 : 0); //num of files send to a node if(rank==0){ //distribute files int i,j; char ***files_tosend = (char***)malloc(sizeof(char**)*input_len); int lsendsize; FILE **node_files; for(i=0;i<size;i++){ lsendsize = input_len/size + (input_len % size - i > 0 ? 1 : 0); //num of files send to a node printf("send size of core %d is %d\n",i,lsendsize); files_tosend[i] = (char**)malloc(sizeof(char*)*lsendsize); for(j=0;j<lsendsize;j++){ files_tosend[i][j] = (char*)malloc(sizeof(char)*50); } } for(i=0;i<input_len;i++){ int belongs_to = i % size; int pos = i/size; strcpy(files_tosend[belongs_to][pos],input_filenames[i]); printf("distributing file %s to files_tosend %d,%d, value %s\n",input_filenames[i],belongs_to,pos,files_tosend[belongs_to][pos]); } if(size>1){ for(i=1;i<size;i++){ lsendsize = input_len/size + (input_len % size - i > 0 ? 1 : 0); for(j=0;j<lsendsize;j++){ printf("sending %s to cpu %d\n",files_tosend[i][j],i); MPI_Send(files_tosend[i][j],50,MPI_BYTE,i,1,MPI_COMM_WORLD); printf("send done\n"); } } } node_files = (FILE**)malloc(sizeof(FILE*)*sendsize); for(i=0;i<sendsize;i++){ node_files[i] = fopen(files_tosend[rank][i],"r"); } gettimeofday (&tvalBefore, NULL); #pragma omp parallel sections { #pragma omp section //reader thread0 { int i; int odd_even = 0; //printf("reader 0 is core #%d\n",rank); for(i=0;i<sendsize;i++){ while(!feof(node_files[i])){ /////////check if full/////////// omp_set_lock(&readerlock); if(!feof(node_files[i])){ strcpy(str,""); fscanf(node_files[i],"%s",str); } else{ omp_unset_lock(&readerlock); break; } omp_unset_lock(&readerlock); if(strcmp(str,"")) putWork(queues_to_map[0], constr_work(str)); } } omp_set_lock(&inclock); read_finish++; omp_unset_lock(&inclock); //printf("reader thread0 done\n"); } #pragma omp section //reader thread1 { int i; int odd_even = 0; //printf("reader 1 is core #%d\n",rank); for(i=0;i<sendsize;i++){ while(!feof(node_files[i])){ /////////check if full/////////// omp_set_lock(&readerlock); if(!feof(node_files[i])){ strcpy(str,""); fscanf(node_files[i],"%s",str); } else{ omp_unset_lock(&readerlock); break; } omp_unset_lock(&readerlock); if(strcmp(str,"")) putWork(queues_to_map[1], constr_work(str)); } } omp_set_lock(&inclock); read_finish++; omp_unset_lock(&inclock); //printf("reader thread1 done\n"); } #pragma omp section //reader thread2 { int i; int odd_even = 0; //printf("reader 2 is core #%d\n",rank); for(i=0;i<sendsize;i++){ while(!feof(node_files[i])){ /////////check if full/////////// omp_set_lock(&readerlock); if(!feof(node_files[i])){ strcpy(str,""); fscanf(node_files[i],"%s",str); } else{ omp_unset_lock(&readerlock); break; } omp_unset_lock(&readerlock); if(strcmp(str,"")) putWork(queues_to_map[2], constr_work(str)); } } omp_set_lock(&inclock); read_finish++; omp_unset_lock(&inclock); //printf("reader thread2 done\n"); } #pragma omp section //reader thread3 { // printf("reader 3 is core #%d\n",rank); int i; int odd_even = 0; for(i=0;i<sendsize;i++){ while(!feof(node_files[i])){ /////////check if full/////////// omp_set_lock(&readerlock); if(!feof(node_files[i])){ strcpy(str,""); fscanf(node_files[i],"%s",str); } else{ omp_unset_lock(&readerlock); break; } omp_unset_lock(&readerlock); if(strcmp(str,"")) putWork(queues_to_map[3], constr_work(str)); } } omp_set_lock(&inclock); read_finish++; omp_unset_lock(&inclock); //printf("reader thread3 done %d\n",rank); } #pragma omp section //mapper thread 0 { int i; fifoQ *innerQ = initQ(50000,"innerQ 0"); while(read_finish<NUM_READ_THREADS || !is_empty(queues_to_map[0])){ printf(""); if(!is_empty(queues_to_map[0])){ work work = getWork(queues_to_map[0]); //mapper(queues_to_reduce[hash(work.str)], work); mapper(innerQ, work); } } for(i=0;i<=innerQ->in;i++){ work work = getWork(innerQ); putWork(queues_to_reduce[hash(work.str)],work); } omp_set_lock(&inclock); mapping_done++; omp_unset_lock(&inclock); //printf("mapper thread0 done %d\n",rank); gettimeofday (&tvalAfter, NULL); elapsed_time = (float)(tvalAfter.tv_sec - tvalBefore.tv_sec)+((float)(tvalAfter.tv_usec - tvalBefore.tv_usec)/1000000); if(rank==0) printf("elapsed time = %.2f sec,rank %d in map 0\n",elapsed_time,rank); } #pragma omp section //mapper thread 1 { int i; fifoQ *innerQ = initQ(50000,"innerQ 1"); while(read_finish<NUM_READ_THREADS || !is_empty(queues_to_map[1])){ printf(""); if(!is_empty(queues_to_map[1])){ work work = getWork(queues_to_map[1]); //mapper(queues_to_reduce[hash(work.str)], work); mapper(innerQ, work); } } for(i=0;i<=innerQ->in;i++){ work work = getWork(innerQ); putWork(queues_to_reduce[hash(work.str)],work); } omp_set_lock(&inclock); mapping_done++; omp_unset_lock(&inclock); //printf("mapper thread1 done %d\n",rank); gettimeofday (&tvalAfter, NULL); elapsed_time = (float)(tvalAfter.tv_sec - tvalBefore.tv_sec)+((float)(tvalAfter.tv_usec - tvalBefore.tv_usec)/1000000); if(rank==0) printf("elapsed time = %.2f sec,rank %d in map 1\n",elapsed_time,rank); } #pragma omp section //mapper thread 2 { int i; fifoQ *innerQ = initQ(50000,"innerQ 2"); while(read_finish<NUM_READ_THREADS || !is_empty(queues_to_map[2])){ printf(""); if(!is_empty(queues_to_map[2])){ work work = getWork(queues_to_map[2]); //mapper(queues_to_reduce[hash(work.str)], work); mapper(innerQ, work); } } for(i=0;i<=innerQ->in;i++){ work work = getWork(innerQ); putWork(queues_to_reduce[hash(work.str)],work); } omp_set_lock(&inclock); mapping_done++; omp_unset_lock(&inclock); //printf("mapper thread2 done %d\n",rank); gettimeofday (&tvalAfter, NULL); elapsed_time = (float)(tvalAfter.tv_sec - tvalBefore.tv_sec)+((float)(tvalAfter.tv_usec - tvalBefore.tv_usec)/1000000); if(rank==0) printf("elapsed time = %.2f sec,rank %d in map 2\n",elapsed_time,rank); } #pragma omp section //mapper thread 3 { int i; fifoQ *innerQ = initQ(50000,"innerQ 2"); while(read_finish<NUM_READ_THREADS || !is_empty(queues_to_map[3])){ printf(""); if(!is_empty(queues_to_map[3])){ work work = getWork(queues_to_map[3]); //mapper(queues_to_reduce[hash(work.str)], work); mapper(innerQ, work); } } for(i=0;i<=innerQ->in;i++){ work work = getWork(innerQ); putWork(queues_to_reduce[hash(work.str)],work); } omp_set_lock(&inclock); mapping_done++; omp_unset_lock(&inclock); //printf("mapper thread3 done %d\n",rank); gettimeofday (&tvalAfter, NULL); elapsed_time = (float)(tvalAfter.tv_sec - tvalBefore.tv_sec)+((float)(tvalAfter.tv_usec - tvalBefore.tv_usec)/1000000); if(rank==0) printf("elapsed time = %.2f sec,rank %d in map 3\n",elapsed_time,rank); } #pragma omp section //reducer thread 0 { int i; gettimeofday (&tvalAfter, NULL); elapsed_time = (float)(tvalAfter.tv_sec - tvalBefore.tv_sec)+((float)(tvalAfter.tv_usec - tvalBefore.tv_usec)/1000000); if(rank==0) printf("elapsed time = %.2f sec,rank %d\n",elapsed_time,rank); while(mapping_done<NUM_READ_THREADS){ printf(""); } gettimeofday (&tvalAfter, NULL); elapsed_time = (float)(tvalAfter.tv_sec - tvalBefore.tv_sec)+((float)(tvalAfter.tv_usec - tvalBefore.tv_usec)/1000000); if(rank==0) printf("elapsed time = %.2f sec,rank %d\n",elapsed_time,rank); queues_reduced[0] = reducer(queues_to_reduce[0]); //printf("reducer thread 0 done\n"); gettimeofday (&tvalAfter, NULL); elapsed_time = (float)(tvalAfter.tv_sec - tvalBefore.tv_sec)+((float)(tvalAfter.tv_usec - tvalBefore.tv_usec)/1000000); if(rank==0) printf("elapsed time = %.2f sec,rank %d\n",elapsed_time,rank); } #pragma omp section //reducer thread 1 { int i; while(mapping_done<NUM_READ_THREADS){printf("");} queues_reduced[1] = reducer(queues_to_reduce[1]); //printf("reducer thread 1 done\n"); } #pragma omp section //reducer thread 2 { int i; while(mapping_done<NUM_READ_THREADS){printf("");} queues_reduced[2] = reducer(queues_to_reduce[2]); //printf("reducer thread 2 done\n"); } #pragma omp section //reducer thread 3 { int i; while(mapping_done<NUM_READ_THREADS){printf("");} queues_reduced[3] = reducer(queues_to_reduce[3]); //printf("reducer thread 3 done\n"); } } gettimeofday (&tvalAfter, NULL); elapsed_time = (float)(tvalAfter.tv_sec - tvalBefore.tv_sec)+((float)(tvalAfter.tv_usec - tvalBefore.tv_usec)/1000000); if(rank==0) printf("elapsed time = %.2f sec,rank %d\n",elapsed_time,rank); } else{ int i; FILE** node_files = (FILE**)malloc(sizeof(FILE*)*sendsize); for(i=0;i<sendsize;i++){ char *bufstr = (char*)malloc(sizeof(char)*50); MPI_Recv(bufstr,50,MPI_BYTE, 0,1, MPI_COMM_WORLD, &status); //printf("%s received\n",bufstr); node_files[i] = fopen(bufstr,"r"); } #pragma omp parallel sections shared(input_files) private(str) { //printf("using %d threads in core %d\n",omp_get_num_threads(),rank); #pragma omp section //reader thread0 { int i; int odd_even = 0; // printf("reader 0 is core #%d\n",rank); for(i=0;i<sendsize;i++){ while(!feof(node_files[i])){ /////////check if full/////////// omp_set_lock(&readerlock); if(!feof(node_files[i])){ strcpy(str,""); fscanf(node_files[i],"%s",str); } else{ omp_unset_lock(&readerlock); break; } omp_unset_lock(&readerlock); if(strcmp(str,"")) putWork(queues_to_map[0], constr_work(str)); } } omp_set_lock(&inclock); read_finish++; omp_unset_lock(&inclock); //printf("reader thread0 done\n"); } #pragma omp section //reader thread1 { int i; int odd_even = 0; // printf("reader 1 is core #%d\n",rank); for(i=0;i<sendsize;i++){ while(!feof(node_files[i])){ /////////check if full/////////// omp_set_lock(&readerlock); if(!feof(node_files[i])){ strcpy(str,""); fscanf(node_files[i],"%s",str); } else{ omp_unset_lock(&readerlock); break; } omp_unset_lock(&readerlock); if(strcmp(str,"")) putWork(queues_to_map[1], constr_work(str)); } } omp_set_lock(&inclock); read_finish++; omp_unset_lock(&inclock); //printf("reader thread1 done\n"); } #pragma omp section //reader thread2 { int i; int odd_even = 0; //printf("reader 2 is core #%d\n",rank); for(i=0;i<sendsize;i++){ while(!feof(node_files[i])){ /////////check if full/////////// omp_set_lock(&readerlock); if(!feof(node_files[i])){ strcpy(str,""); fscanf(node_files[i],"%s",str); } else{ omp_unset_lock(&readerlock); break; } omp_unset_lock(&readerlock); if(strcmp(str,"")) putWork(queues_to_map[2], constr_work(str)); } } omp_set_lock(&inclock); read_finish++; omp_unset_lock(&inclock); //printf("reader thread2 done\n"); } #pragma omp section //reader thread3 { //printf("reader 3 is core #%d\n",rank); int i; int odd_even = 0; for(i=0;i<sendsize;i++){ while(!feof(node_files[i])){ /////////check if full/////////// omp_set_lock(&readerlock); if(!feof(node_files[i])){ strcpy(str,""); fscanf(node_files[i],"%s",str); } else{ omp_unset_lock(&readerlock); break; } omp_unset_lock(&readerlock); if(strcmp(str,"")) putWork(queues_to_map[3], constr_work(str)); } } omp_set_lock(&inclock); read_finish++; omp_unset_lock(&inclock); //printf("reader thread3 done %d\n",rank); } #pragma omp section //mapper thread 0 { int i; fifoQ *innerQ = initQ(50000,"innerQ 0"); //printf("map1\n"); while(read_finish<NUM_READ_THREADS || !is_empty(queues_to_map[0])){ printf(""); if(!is_empty(queues_to_map[0])){ work work = getWork(queues_to_map[0]); //mapper(queues_to_reduce[hash(work.str)], work); mapper(innerQ, work); } } for(i=0;i<=innerQ->in;i++){ work work = getWork(innerQ); putWork(queues_to_reduce[hash(work.str)],work); } omp_set_lock(&inclock); mapping_done++; omp_unset_lock(&inclock); //printf("mapper thread0 done %d\n",rank); } #pragma omp section //mapper thread 1 { int i; fifoQ *innerQ = initQ(50000,"innerQ 1"); while(read_finish<NUM_READ_THREADS || !is_empty(queues_to_map[1])){ printf(""); if(!is_empty(queues_to_map[1])){ work work = getWork(queues_to_map[1]); //mapper(queues_to_reduce[hash(work.str)], work); mapper(innerQ, work); } } for(i=0;i<=innerQ->in;i++){ work work = getWork(innerQ); putWork(queues_to_reduce[hash(work.str)],work); } omp_set_lock(&inclock); mapping_done++; omp_unset_lock(&inclock); //printf("mapper thread1 done %d\n",rank); } #pragma omp section //mapper thread 2 { int i; fifoQ *innerQ = initQ(50000,"innerQ 2"); while(read_finish<NUM_READ_THREADS || !is_empty(queues_to_map[2])){ printf(""); if(!is_empty(queues_to_map[2])){ work work = getWork(queues_to_map[2]); //mapper(queues_to_reduce[hash(work.str)], work); mapper(innerQ, work); } } for(i=0;i<=innerQ->in;i++){ work work = getWork(innerQ); putWork(queues_to_reduce[hash(work.str)],work); } omp_set_lock(&inclock); mapping_done++; omp_unset_lock(&inclock); //printf("mapper thread2 done %d\n",rank); } #pragma omp section //mapper thread 3 { int i; fifoQ *innerQ = initQ(50000,"innerQ 2"); while(read_finish<NUM_READ_THREADS || !is_empty(queues_to_map[3])){ printf(""); if(!is_empty(queues_to_map[3])){ work work = getWork(queues_to_map[3]); //mapper(queues_to_reduce[hash(work.str)], work); mapper(innerQ, work); } } for(i=0;i<=innerQ->in;i++){ work work = getWork(innerQ); putWork(queues_to_reduce[hash(work.str)],work); } omp_set_lock(&inclock); mapping_done++; omp_unset_lock(&inclock); //printf("mapper thread3 done %d\n",rank); } #pragma omp section //reducer thread 0 { int i; while(mapping_done<NUM_READ_THREADS){ printf(""); } queues_reduced[0] = reducer(queues_to_reduce[0]); //printf("reducer thread 0 done\n"); } #pragma omp section //reducer thread 1 { int i; while(mapping_done<NUM_READ_THREADS){printf("");} queues_reduced[1] = reducer(queues_to_reduce[1]); //printf("reducer thread 1 done\n"); } #pragma omp section //reducer thread 2 { int i; while(mapping_done<NUM_READ_THREADS){printf("");} queues_reduced[2] = reducer(queues_to_reduce[2]); //printf("reducer thread 2 done\n"); } #pragma omp section //reducer thread 3 { int i; while(mapping_done<NUM_READ_THREADS){printf("");} queues_reduced[3] = reducer(queues_to_reduce[3]); //printf("reducer thread 3 done\n"); } } } MPI_Barrier(MPI_COMM_WORLD); gettimeofday (&tvalAfter, NULL); elapsed_time = (float)(tvalAfter.tv_sec - tvalBefore.tv_sec)+((float)(tvalAfter.tv_usec - tvalBefore.tv_usec)/1000000); if(rank==0) printf("elapsed time = %.2f sec,rank %d\n",elapsed_time,rank); if(rank==0){ //final reducuction int i,j,revbuf;int mainct; for(i=0;i<NUM_READ_THREADS;i++){ combine_queue(final_queue,queues_reduced[i]); } //printf("main node has %d to final reduce\n",calcnum(queues_reduced,NUM_READ_THREADS)); for(i=1;i<size;i++){ MPI_Recv(&revbuf,1,MPI_INT,i,1,MPI_COMM_WORLD,&status); //printf("need to receive %d strings from node %d\n",revbuf,i); char *strbuf = (char*)malloc(sizeof(char)*50); char ctbuf = 0; for(j=0;j<revbuf;j++){ MPI_Recv(strbuf,50,MPI_BYTE,i,1,MPI_COMM_WORLD,&status); MPI_Recv(&ctbuf,50,MPI_INT,i,1,MPI_COMM_WORLD,&status); work work; strcpy(work.str,strbuf); work.count = ctbuf; //printf("received <%s,%d> from node %d\n",work.str,work.count,i); putWork(final_queue,work); } } fifoQ *output = reducer(final_queue); printQ_to_file(&output,1,out); }else{ int i,total_num; total_num = calcnum(queues_reduced,NUM_READ_THREADS); MPI_Send(&total_num,1,MPI_INT,0,1,MPI_COMM_WORLD); for(i=0;i<NUM_READ_THREADS;i++){ combine_queue(final_queue,queues_reduced[i]); } for(i=0;i<total_num;i++){ MPI_Send(&final_queue->works[i].str,50,MPI_BYTE,0,1,MPI_COMM_WORLD); MPI_Send(&final_queue->works[i].count,1,MPI_INT,0,1,MPI_COMM_WORLD); } } for(i=0;i<input_len;i++){ fclose(input_files[i]); } fclose(out); /*printQ(queues_to_map[0]); printQ(queues_to_map[1]); printQ(queues_to_map[2]); printQ(queues_to_map[3]);*/ /*printQ(queues_reduced[0]); printQ(queues_reduced[1]); printQ(queues_reduced[2]); printQ(queues_reduced[3]);*/ omp_destroy_lock(&inclock); omp_destroy_lock(&worklock); omp_destroy_lock(&readlock); omp_destroy_lock(&readerlock); omp_destroy_lock(&mapperlock); MPI_Finalize(); return 0; }
int bitcoin_loop(const unsigned int processcount) { printf("\n\nStarting bitcoin_loop\n"); // Start, end time unsigned long start,end; // Set start time start = current_time_millis(); // TODO: Create a Blockheader object and fill it with the initial data using the getWork Method Blockheader * b_header = malloc(sizeof(Blockheader)); getWork(b_header); // TODO: Split the calculation of the hashes into several segments based on the processcount int * pids = malloc(sizeof(int) * processcount); for (int i = 0; i < processcount; i++) { if (i > 0) { if (pids[i - 1] == 0) { break; } } pids[i] = fork(); } if (pids[processcount - 1] != 0) { } int child_pid = 0; int num_process = 0; for (int i = 0; i < processcount - 1; i++) { child_pid = fork(); if (child_pid != 0) break; num_process++; } printf("%i", num_process); calc_hashes((int)floor(MAX_HASHES / processcount)); wait(child_pid); return 0; /*char * n = malloc(sizeof(char) * 4); memcpy(n, &(b_header->nonce), sizeof(char) * 4); byte_reversal(n, sizeof(char) * 4); // copy paste unsigned long starting_nonce = n[0] << 24 | n[1] << 16 | n[2] << 8 | n[3]; starting_nonce += 0; // The nonce value we received in the getWork method is the actual starting nonce value. We start to calculate hashes with this initial nonce and increase it by one in each run. unsigned long nonce = starting_nonce; char * hash; // In practice it is very hard to find a valid hash, so in this exercise we will limit the amount of hashes we calculate. for( ; nonce<=(starting_nonce + MAX_HASHES); nonce++) { // put current nonce in blockheader object // first, shift long back to char[4] n[0] = nonce >> 24; n[1] = nonce >> 16; n[2] = nonce >> 8; n[3] = nonce; // reverse byte order byte_reversal(n,sizeof(char)*4); // put n into blockheader blockheader->nonce[0] = n[0]; blockheader->nonce[1] = n[1]; blockheader->nonce[2] = n[2]; blockheader->nonce[3] = n[3]; // calculate the hash using the sha-256 hashing algorithm size_t size = getData(blockheader,&hash); size = sha256_digest(hash,size,&hash); // To calculate a valid hash, we need to do two hashing passes size = sha256_digest(hash,size,&hash); if(check_hash(hash,(int)size)) { printf("%ld : ", nonce); print_hash(hash,size); } } if(child_pid != 0) wait(child_pid); // TODO: If a hash has the appropriate difficulty (hint: check_hash) print it on the console using print_hash end = current_time_millis(); printf("Calculation finished after %.3fs\n", (double) (end - start) / 1000); return EXIT_FAILURE;*/ }
int main(int argc, char **argv){ FILE *file = fopen("file1","r"); FILE *out = NULL; char str_buf[1024][50]; unsigned str_buf_in = 0; unsigned str_buf_out = 0; char str[50]; int read_finish = 0; int num_read = 0, num_write = 0; int mapping_done = 0; char **input_filenames = NULL; int input_len; //num of input files FILE **input_files = NULL; int i,j; ////locks/// int reader_lock = 0; int mapper_lock = 0; if(argc < 5){ printf("Usage ./mapreduce -in [input files].... -out [output file]\n"); return 0; }else{ if(strcmp("-in",argv[1])){ printf("Usage ./mapreduce -in [input files].... -out [output file]\n"); return 0; } for(i=2;i<argc;i++){ //start from first input file if(!strcmp("-out",argv[i])){ break; } } input_len = i - 2; input_filenames = (char**)malloc(sizeof(char*)*input_len); for(j=0;j<input_len;j++) input_filenames[j] = (char*)malloc(sizeof(char)*50); for(i=2,j=0;j<input_len;i++,j++){ strcpy(input_filenames[j],argv[i]); } input_files = read_in(input_filenames,input_len,0); if(strcmp("-out",argv[2+input_len])){ printf("output file missing, using default name 'out'\n"); out = fopen("out","w"); }else{ out = fopen(argv[3+input_len],"w"); } } omp_set_num_threads(2); workQ *queue_to_map = initQ(100000); workQ *queue_to_reduce = initQ(100000); //workQ *queue_to_redece2 #pragma omp parallel sections { #pragma omp section //reader thread { int i; for(i=0;i<input_len;i++){ while(!feof(input_files[i])){ /////////check if full/////////// fscanf(input_files[i],"%s",str); //printf("reading %s\n",str); while(mapper_lock){} reader_lock = 1; putWork(queue_to_map, constr_work(str)); reader_lock = 0; num_read++; } fclose(input_files[i]); } read_finish = 1; printf("reader thread done\n"); } /*#pragma omp section //writer thread { while(!read_finish || (str_buf_in != str_buf_out)){ while(str_buf_in == str_buf_out){} //wait if buffer empty if(fprintf(out,"%s ",str_buf[str_buf_out]) < 0) printf("write fails\n"); str_buf_out++; } fclose(out); }*/ #pragma omp section //mapper thread 1 { while(!read_finish || !is_empty(queue_to_map)){ if(!is_empty(queue_to_map)){ while(reader_lock){} mapper_lock = 1; work work = getWork(queue_to_map); mapper_lock = 0; mapper(queue_to_reduce, work); } } printf("mapper thread done\n"); } } printQ(queue_to_reduce); /*for(i=0;i<dist_count;i++){ work work = getWork(queue_to_reduce); printf("%s, %d\n",work.str,work.count); }*/ return 0; }