/** * Destroy BAM framework data structure. */ void bfwork_destroy(bam_fwork_t *fwork) { int i; bam_region_t *region; linked_list_t *list; size_t list_l; assert(fwork); assert(fwork->regions_list); //Handle to list list = fwork->regions_list; //Regions exists? if(fwork->regions_list) { //for(i = 0; i < wanderer->regions_l; i++) list_l = linked_list_size(list); for(i = 0; i < list_l; i++) { //Get region region = linked_list_get(i, list); breg_destroy(region, 1); free(region); } linked_list_free(list, NULL); } //Destroy lock omp_destroy_lock(&fwork->regions_lock); omp_destroy_lock(&fwork->output_file_lock); omp_destroy_lock(&fwork->reference_lock); }
void mexFunction(int32_T nlhs, mxArray *plhs[], int32_T nrhs, const mxArray *prhs[]) { emlrtStack st = { NULL, NULL, NULL }; mexAtExit(rffe_test_atexit); /* Initialize the memory manager. */ omp_init_lock(&emlrtLockGlobal); omp_init_nest_lock(&emlrtNestLockGlobal); emlrtLoadLibrary("/usr/local/MATLAB/R2015b/sys/os/glnxa64/libiomp5.so"); /* Module initialization. */ rffe_test_initialize(); st.tls = emlrtRootTLSGlobal; emlrtSetJmpBuf(&st, &emlrtJBEnviron); if (setjmp(emlrtJBEnviron) == 0) { /* Dispatch the entry-point. */ rffe_test_mexFunction(nlhs, plhs, nrhs, prhs); omp_destroy_lock(&emlrtLockGlobal); omp_destroy_nest_lock(&emlrtNestLockGlobal); } else { omp_destroy_lock(&emlrtLockGlobal); omp_destroy_nest_lock(&emlrtNestLockGlobal); emlrtReportParallelRunTimeError(&st); } }
// This function is run after kernel are finished. void destroy_locks(int nclusters) { omp_destroy_lock(&delta_lock); for(int n = 0; n < nclusters; n++) { omp_destroy_lock(&new_centers_locks[n][0]); omp_destroy_lock(&new_centers_locks[n][1]); omp_destroy_lock(&new_centers_len_locks[n]); } }
void POMP2_Destroy_lock(omp_lock_t *s) { if ( pomp2_tracing ) { esd_enter(epk_omp_regid[EPK__OMP_DESTROY_LOCK]); omp_destroy_lock(s); epk_lock_destroy(s); esd_exit(epk_omp_regid[EPK__OMP_DESTROY_LOCK]); } else { omp_destroy_lock(s); epk_lock_destroy(s); } }
void POMP_Destroy_lock(omp_lock_t *s) { if ( IS_POMP_TRACE_ON ) { uint64_t time; time = vt_pform_wtime(); vt_enter(VT_CURRENT_THREAD, &time, vt_omp_regid[VT__OMP_DESTROY_LOCK]); omp_destroy_lock(s); time = vt_pform_wtime(); vt_exit(VT_CURRENT_THREAD, &time); } else { omp_destroy_lock(s); } }
VT_DECLDEF(void POMP_Destroy_lock_f(omp_lock_t *s)) { if ( IS_POMP_TRACE_ON ) { uint64_t time; time = vt_pform_wtime(); vt_enter(VT_CURRENT_THREAD, &time, vt_omp_regid[VT__OMP_DESTROY_LOCK]); omp_destroy_lock(s); time = vt_pform_wtime(); vt_exit(VT_CURRENT_THREAD, &time); } else { omp_destroy_lock(s); } } VT_GENERATE_F77_BINDINGS(pomp_destroy_lock, POMP_DESTROY_LOCK,
DEF_FPOMP_FUNC(void POMP_Destroy_lock_f(omp_lock_t *s)) { if ( IS_POMP_TRACE_ON ) { uint64_t time = vt_pform_wtime(); vt_enter(&time, vt_omp_regid[VT__OMP_DESTROY_LOCK]); omp_destroy_lock(s); vt_lock_destroy(s); time = vt_pform_wtime(); vt_exit(&time); } else { omp_destroy_lock(s); vt_lock_destroy(s); } } VT_GENERATE_F77_BINDINGS(pomp_destroy_lock, POMP_DESTROY_LOCK,
int main( int argc, char **argv ) { struct node *dummy_node; dummy_node = malloc(sizeof(*dummy_node)); dummy_node->data = DUMMY; dummy_node->next = NULL; struct queue q = { .head = dummy_node, .tail = dummy_node }; omp_init_lock(&q.tail_lock); omp_init_lock(&q.head_lock); #pragma omp parallel #pragma omp single nowait { int i; for(i=1;i<5;++i) { #pragma omp task { int j; for(j = 0; j < 1000; ++j) { enqueue(&q, i*1000+j); printf("enqueue %d\n", i*1000+j); } } #pragma omp task { int d, j; for(j = 0; j < 1000; ++j) { d = dequeue(&q); if (d) printf("dequeue %d\n", d); } } } } int d; while(1) { d = dequeue(&q); if (!d) break; printf("dequeue %d\n", d); } assert(q.head->data == DUMMY && q.tail->data == DUMMY); //assert(!q.head && !q.tail); omp_destroy_lock(&q.tail_lock); omp_destroy_lock(&q.head_lock); return 0; }
void Model::CalcInfill() { if (!settings.get_boolean("Slicing","DoInfill") && settings.get_double("Slicing","SolidThickness") == 0.0) return; int count = (int)layers.size(); m_progress->start (_("Infill"), count); int progress_steps=max(1,(count/100)); bool cont = true; //cerr << "make infill"<< endl; #ifdef _OPENMP omp_lock_t progress_lock; omp_init_lock(&progress_lock); #pragma omp parallel for schedule(dynamic) #endif for (int i=0; i < count ; i++) { //cerr << "thread " << omp_get_thread_num() << endl; if (i%progress_steps==0){ #ifdef _OPENMP omp_set_lock(&progress_lock); #endif cont = (m_progress->update(i)); #ifdef _OPENMP omp_unset_lock(&progress_lock); #endif } if (!cont) continue; layers[i]->CalcInfill(settings); } #ifdef _OPENMP omp_destroy_lock(&progress_lock); #endif //m_progress->stop (_("Done")); }
void Model::MakeShells() { int count = (int)layers.size(); if (count == 0) return; if (!m_progress->restart (_("Shells"), count)) return; int progress_steps=max(1,(int)(count/100)); bool cont = true; #ifdef _OPENMP omp_lock_t progress_lock; omp_init_lock(&progress_lock); #pragma omp parallel for schedule(dynamic) #endif for (int i=0; i < count; i++) { if (i%progress_steps==0) { #ifdef _OPENMP omp_set_lock(&progress_lock); #endif cont = (m_progress->update(i)); #ifdef _OPENMP omp_unset_lock(&progress_lock); #endif } if (!cont) continue; layers[i]->MakeShells(settings); } #ifdef _OPENMP omp_destroy_lock(&progress_lock); #endif m_progress->update(count); //m_progress->stop (_("Done")); }
int main(int argc, char **argv){ srand(time(NULL)); start(); //disconnect(); glutInit(&argc, argv); glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB); glutInitWindowPosition(1, 1); glutInitWindowSize(700, 700); glutCreateWindow("TREE SEARCH"); init(); glutDisplayFunc(display); glutIdleFunc(idle); glutReshapeFunc(reshape); omp_init_lock(&best_tour_lock); printf("Procesando...\n"); double time0 = omp_get_wtime(); partition(stacks,t1); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < thread_count; i++){ TSP((void *) stacks[i]); } omp_destroy_lock(&best_tour_lock); double time1 = omp_get_wtime(); printf("Costo: %f\n",best_tour->cost); /**Impresion del tiempo que tardo el algoritmo para determinar las * posiciones y velocidades de las particulas ***/ printf("duracion: %f\n",time1-time0); glutMainLoop(); return 0; }
AspEventHandler::~AspEventHandler() { _LOG_ << _HANDLER_TITLE_ << "Finalizing AspEventHandler"; _LOG_ << _HANDLER_TITLE_ << "Destroying the lock"; omp_destroy_lock(&lock_); if(num_unique_cycles_ > 0) { printf("\n" "****************************************************\n" "****************************************************\n" "***** Active testing found:\n" "***** %d data races and serializability violations in %s/%s!\n" "***** (See the report at work/%s/report.txt for details.)\n" "****************************************************\n" "****************************************************\n", num_unique_cycles_, test_name_, variant_name_, test_name_); } else { printf("\n" "****************************************************\n" "****************************************************\n" "***** Active testing found:\n" "***** NO data races or serializability violations.\n" "****************************************************\n" "****************************************************\n"); } fprintf(report_file_, "Number of all cycles: %d\nNumber of unique cycles: %d\n", num_cycles_, num_unique_cycles_); fclose(report_file_); }
int64_t PopBubbles(SuccinctDBG &dbg, int max_bubble_len, double low_depth_ratio) { omp_lock_t bubble_lock; omp_init_lock(&bubble_lock); const int kMaxBranchesPerGroup = 4; if (max_bubble_len <= 0) { max_bubble_len = dbg.kmer_k * 2 + 2; } vector<std::pair<int, int64_t> > bubble_candidates; int64_t num_bubbles = 0; #pragma omp parallel for for (int64_t node_idx = 0; node_idx < dbg.size; ++node_idx) { if (dbg.IsValidNode(node_idx) && dbg.IsLast(node_idx) && dbg.Outdegree(node_idx) > 1) { BranchGroup bubble(&dbg, node_idx, kMaxBranchesPerGroup, max_bubble_len); if (bubble.Search()) { omp_set_lock(&bubble_lock); bubble_candidates.push_back(std::make_pair(bubble.length(), node_idx)); omp_unset_lock(&bubble_lock); } } } for (unsigned i = 0; i < bubble_candidates.size(); ++i) { BranchGroup bubble(&dbg, bubble_candidates[i].second, kMaxBranchesPerGroup, max_bubble_len); if (bubble.Search() && bubble.RemoveErrorBranches(low_depth_ratio)) { ++num_bubbles; } } omp_destroy_lock(&bubble_lock); return num_bubbles; }
int test_omp_test_lock() { int nr_threads_in_single = 0; int result = 0; int nr_iterations = 0; int i; omp_init_lock (&lck); #pragma omp parallel shared(lck) { #pragma omp for for (i = 0; i < LOOPCOUNT; i++) { while (!omp_test_lock (&lck)) {}; #pragma omp flush nr_threads_in_single++; #pragma omp flush nr_iterations++; nr_threads_in_single--; result = result + nr_threads_in_single; omp_unset_lock (&lck); } } omp_destroy_lock(&lck); return ((result == 0) && (nr_iterations == LOOPCOUNT)); }
void collisions_resolve(void) { #ifdef OPENMP omp_lock_t boundarylock; omp_init_lock(&boundarylock); #endif //OPENMP #pragma omp parallel for schedule (static,1) for (int proci=0; proci<sweeps_proc; proci++) { struct collision* c = clist[proci].collisions; int colN = clist[proci].N; total_collisions = total_collisions + colN; if (output_check(.1 * 2. * M_PI / 1.)) { printf("%f", total_collisions); // total_collisions = 0; } // Randomize array. for(int i=0; i<colN; i++) { int j = rand()%colN; struct collision ctemp = c[i]; c[i]=c[j]; c[j]=ctemp; } for(int i=0; i<colN; i++) { struct collision c1= c[i]; c1.gb = boundaries_get_ghostbox(0,0,0); particles[c1.p1].x -= c1.time*particles[c1.p1].vx; particles[c1.p1].y -= c1.time*particles[c1.p1].vy; particles[c1.p1].z -= c1.time*particles[c1.p1].vz; particles[c1.p2].x -= c1.time*particles[c1.p2].vx; particles[c1.p2].y -= c1.time*particles[c1.p2].vy; particles[c1.p2].z -= c1.time*particles[c1.p2].vz; #ifdef OPENMP if (c1.crossing) { omp_set_lock(&boundarylock); } #endif //OPENMP collision_resolve(c1); #ifdef OPENMP if (c1.crossing) { omp_unset_lock(&boundarylock); } #endif //OPENMP particles[c1.p1].x += c1.time*particles[c1.p1].vx; particles[c1.p1].y += c1.time*particles[c1.p1].vy; particles[c1.p1].z += c1.time*particles[c1.p1].vz; particles[c1.p2].x += c1.time*particles[c1.p2].vx; particles[c1.p2].y += c1.time*particles[c1.p2].vy; particles[c1.p2].z += c1.time*particles[c1.p2].vz; } clist[proci].N = 0; sweepphi[proci].N = 0; } #ifdef OPENMP omp_destroy_lock(&boundarylock); #endif //OPENMP }
~PriorityQueue() { if ( size_ > 0 ) alloc_.deallocate(dataQue_, size_); #ifdef USE_OPENMP_NA omp_destroy_lock(&lock_); #endif }
int main() { omp_lock_t lck; int id; omp_init_lock(&lck); #pragma omp parallel shared(lck) private(id) { id = omp_get_thread_num(); omp_set_lock(&lck); /* only one thread at a time can execute this printf */ printf("My thread id is %d.\n", id); omp_unset_lock(&lck); while (! omp_test_lock(&lck)) { skip(id); /* we do not yet have the lock, so we must do something else */ } work(id); /* we now have the lock and can do the work */ omp_unset_lock(&lck); } omp_destroy_lock(&lck); return 0; }
void computeHistogram (const Eigen::MatrixXf &data, Eigen::MatrixXf &histogram, size_t bins, float min, float max) { float bin_size = (max-min) / bins; int num_dim = data.cols(); histogram = Eigen::MatrixXf::Zero (bins, num_dim); for (int dim = 0; dim < num_dim; dim++) { omp_lock_t bin_lock[bins]; for(size_t pos=0; pos<bins; pos++) omp_init_lock(&bin_lock[pos]); #pragma omp parallel for firstprivate(min, max, bins) schedule(dynamic) for (int j = 0; j<data.rows(); j++) { int pos = std::floor( (data(j,dim) - min) / bin_size); if(pos < 0) pos = 0; if(pos > (int)bins) pos = bins - 1; omp_set_lock(&bin_lock[pos]); histogram(pos,dim)++; omp_unset_lock(&bin_lock[pos]); } for(size_t pos=0; pos<bins; pos++) omp_destroy_lock(&bin_lock[pos]); } }
/* * Clears the resourses used */ void cleanAll(board_t* board){ size_t n; #pragma omp parallel { #pragma omp sections { #pragma omp section free(board->vectorHeight); #pragma omp section free(board->vectorWidth); } #pragma omp for private (n) for(n = 0; n < board->height; n++){ free(board->matrix[n]); } #pragma omp for private (n) for(n = 0; n < ((board->height +1) * (board->width +1)); n++){ omp_destroy_lock(&(board->locks[n])); } #pragma omp sections { #pragma omp section free(board->locks); #pragma omp section free(board->matrix); } } free(board); }
void Infill::clearPatterns() { for (uint i=0; i<savedPatterns.size(); i++) { savedPatterns[i].type = INVALIDINFILL; savedPatterns[i].cpolys.clear(); } savedPatterns.clear(); //cerr << "clearpatterns " << savedPatterns.size() << endl; omp_destroy_lock(&save_lock); omp_init_lock(&save_lock); }
main () { omp_lock_t lck; int errors = 0; omp_init_lock (&lck); omp_set_lock (&lck); omp_unset_lock (&lck); omp_destroy_lock (&lck); omp_init_lock (&lck); omp_set_lock (&lck); omp_unset_lock (&lck); omp_destroy_lock (&lck); omp_init_lock (&lck); omp_set_lock (&lck); omp_unset_lock (&lck); omp_destroy_lock (&lck); #ifdef __OMNI_SCASH__ printf ("skip some tests. because, Omni on SCASH do not support destroy lock variable that is locked anyone.\n"); #else omp_init_lock (&lck); omp_set_lock (&lck); omp_unset_lock (&lck); omp_set_lock (&lck); omp_unset_lock (&lck); #endif if (errors == 0) { printf ("omp_unset_lock 001 : SUCCESS\n"); return 0; } else { printf ("omp_unset_lock 001 : FAILED\n"); return 1; } }
int main() { omp_init_lock(&total_market_write_lock); omp_init_lock(&jobs_lock); srand(0); int tickNum = 0; nodes.resize(100); for(int i = 0; i < 10000; i++) { Job* job = new Job(); //job->assumed_correctness_sum = getRand(0.0f, 0.1f); jobs.insert(job); } std::cout << "Init done." << std::endl; FILE* pipe = _popen("\"C:/Program Files (x86)/gnuplot/bin/pgnuplot.exe\" -persist", "w"); //fprintf(pipe, "set terminal png crop size 640,480\n"); fprintf(pipe, "set terminal wx\n"); fprintf(pipe, "set output \"%s\"\n", "output/output.png"); fprintf(pipe, "plot '-' using 1:2 with lines \n"); while(tickNum < 1000) { #pragma omp parallel for for(int i = 0; i < nodes.size(); i++) { handleNode(&nodes[i]); } if((tickNum % 10) == 0) { fprintf(pipe, "%d %g\n", tickNum, nodes[0].getCorrectnessScaled()); printf("%d\n", tickNum); } ++tickNum; } fprintf(pipe, "e\n"); fflush(pipe); _pclose(pipe); omp_destroy_lock(&total_market_write_lock); omp_destroy_lock(&jobs_lock); }
/** * Destroy a read-write lock. * @param rw pointer to lock object */ void rwlock_destroy(rwlock_t *rw) { #ifdef ENABLE_PRWLOCK pthread_rwlock_destroy(&rw->lock); #else #ifdef HAVE_OPENMP omp_destroy_lock(&rw->lock); #endif #endif }
void destroyStack(struct Stack *s) { struct Node *temp; while (s->top != NULL) { temp = s->top; s->top = s->top->next; free(temp); } omp_destroy_lock(&s->lock); free(s); }
Vertex::~Vertex() { EdgePtrVecIter iter = m_edges.begin(); for(; iter != m_edges.end(); ++iter) { delete *iter; *iter = NULL; } //Added by YTH omp_destroy_lock(&m_lock); }
/** * Destroy BAM context data structure. */ void bfwork_context_destroy(bfwork_context_t *context) { assert(context); //Free if(context->output_file_str) free(context->output_file_str); //Destroy locks omp_destroy_lock(&context->user_data_lock); }
void LSHReservoirSampler::unInit() { #if defined OPENCL_HASHTABLE clReleaseMemObject(_tableMem_obj); clReleaseMemObject(_tableMemAllocator_obj); clReleaseMemObject(_tablePointers_obj); clReleaseMemObject(_globalRand_obj); #elif defined CPU_TB delete[] _tableMem; delete[] _tablePointers; delete[] _tableMemAllocator; for (unsigned long long i = 0; i < _tablePointerMax; i++) { omp_destroy_lock(_tablePointersLock + i); } for (unsigned long long i = 0; i < _tableMemReservoirMax; i++) { omp_destroy_lock(_tableCountersLock + i); } delete[] _tablePointersLock; delete[] _tableCountersLock; #endif delete[] _global_rand; }
void VTThrd_deleteMutex(VTThrdMutex** mutex) { if (*mutex == NULL) return; # pragma omp critical (mutexInitMutex) { if (*mutex != NULL ) { omp_destroy_lock(&((*mutex)->m)); free(*mutex); *mutex = NULL; } } }
int main() { int i; int num_failed=0; omp_init_lock(&lock); for(i = 0; i < REPETITIONS; i++) { if(!test_kmp_set_defaults_lock_bug()) { num_failed++; } } omp_destroy_lock(&lock); return num_failed; }
StructuredSVM::~StructuredSVM() { if(trainset) delete trainset; if(trainfile) free(trainfile); if(modelfile) free(modelfile); if(validationfile) free(validationfile); if(exampleIdsToIndices) free(exampleIdsToIndices); if(exampleIndicesToIds) free(exampleIndicesToIds); if(sum_w) delete sum_w; if(u_i_buff) free(u_i_buff); omp_destroy_lock(&my_lock); }