// Performs a union of two sets, does not allow duplicate values struct set_t *set_union(struct set_t *set1, struct set_t *set2) { struct set_t *temp_set_head = NULL; struct set_t *temp_set_it = NULL; struct set_t *set_it = set1; // Add each element of set1 while(set_it != NULL) { if(temp_set_head == NULL) { temp_set_head = new_set(set_it->value); temp_set_it = temp_set_head; } else { temp_set_it->next = set_add(temp_set_head, set_it->value); if(temp_set_it->next != NULL) temp_set_it = temp_set_it->next; } set_it = set_it->next; } // Add each element of set2 set_it = set2; while(set_it != NULL) { if(temp_set_head == NULL) { temp_set_head = new_set(set_it->value); temp_set_it = temp_set_head; } else { temp_set_it->next = set_add(temp_set_head, set_it->value); if(temp_set_it->next != NULL) temp_set_it = temp_set_it->next; } set_it = set_it->next; } return temp_set_head; }
void test_set_operations(){ set_t *even1 = new_set(10); set_t *even2 = new_set(10); set_t *odd = new_set(10); int i; for (i=0; i < 10; i++){ set_put(even1, 2*i); set_put(even2, 2*i); set_put(odd, 2*i+1); } set_union(even1, odd); assert(set_size(even1) == 20); set_difference(even2, odd); assert(set_size(even2) == 10); set_intersection(even2, odd); assert(set_size(even2) == 0); set_print(even1); printf("\n"); set_optimize(even1); set_print(even1); printf("\n"); set_print(even2); printf("\n"); set_print(odd); printf("\n"); delete_set(even1); delete_set(even2); delete_set(odd); }
/********************************************************* * construct LR(1) items. * * This algorithm merges canonical LR(1) items together * * based on thier common cores. * *-------------------------------------------------------* * This is the main LR algorithm used in this program. * * Effectivly this is the core of clrgen and these item * * sets are used to generate our parse table below. * * NOTE: * * this algorithms efficiency can be greatly improved by * * unfolding bnf_goto into the main loop and by changing * * the way we compute transitions by preventing the * * function from looping through every grammar symbol * *********************************************************/ cset* bnf_construct_items(bnf_grammar* bnf) { int i=0; slist_elem* isle=0; slist_elem* sle=0; int iind=0; int iadded=1; cset* items=new_set(compare_bnf_item_sets); cset* current_set=new_set(compare_bnf_index); bnf_index* cbi=new_bnf_index(0,1,0,0); if(!bnf) return 0; /********************************************************* * construct the first item/start state of the item set * *********************************************************/ cset_add(cbi->lookaheads,(void*)bnf->start_of_actions); cset_add(cbi->lookaheads,(void*)(bnf->start_of_actions+1)); cset_add(current_set,(void*)cbi); bnf_closure(bnf,cbi,current_set); cset_add(items,(void*)current_set); isle=items->members->_head; for(;isle;isle=isle->_next,++iind) /*********************************************************** * cycle through all symbols and compute thier transitions * * for this current item by taking a goto closure and * * trying to add it to the core item set. * * if we can't add then we merge lookaheads with a * * matching state if any because we do not want to discard * * generated lookaheads. * ***********************************************************/ for(i=0;i<bnf->start_of_actions;++i) { bnf_goto_closure(bnf,(cset*)isle->_data,current_set,i); if(current_set&¤t_set->members->_size>0) { if(!cset_add(items,(void*)current_set)) bnf_merge_item_lookaheads((cset*)cset_get_first_match(items,current_set),current_set); /* re-allocate a new set structure. NOTE: i want to take this out and use a cache-able version because reallocations are unneccessary and affect performance. */ current_set=new_set(compare_bnf_index); } } while(propagate_lookaheads(bnf,items)); return items; }
static void init_vertex(vertex_t* v, size_t index, size_t nsymbol, size_t max_succ) { int i; v->index = index; v->succ = calloc(max_succ, sizeof(vertex_t*)); if (v->succ == NULL) error("out of memory"); for (i = 0; i < NSETS; i += 1) v->set[i] = new_set(nsymbol); v->prev = new_set(nsymbol); }
/************************************************* * initialize the global bnf first items array. * * This array mirrors the bnf->lexicon array and * * and each entry here represents a first items * * entry for the symbol of the same index in the * * lexicon array. * *************************************************/ void initialize_first_array(bnf_grammar* bnf) { first_items=(cset**)calloc(bnf->start_of_actions+1,sizeof(cset*)); int i=0; for(;i<=bnf->start_of_actions;++i) first_items[i]=new_set(compare_bnf_lookaheads); }
int *intersect_sets(int *l1, int *l2, int type) /* makes the intersection of two sets */ { int i, *l = new_set(type); for(i = 0; i < set_size(type); i++) l[i] = l1[i] & l2[i]; return l; }
int *dup_set(int *l, int type) /* duplicates a set */ { int i, *m = new_set(type); for(i = 0; i < set_size(type); i++) m[i] = l[i]; return m; }
int *make_set(int n, int type) /* creates the set {n}, or the empty set if n = -1 */ { int *l = clear_set(new_set(type), type); if(n == -1) return l; l[n/mod] = 1 << (n%mod); return l; }
// Relative complement of set1 in set2. Returns a set of elements that exist in set2 but not in set1 struct set_t *set_difference(struct set_t *set1, struct set_t *set2) { struct set_t *temp_set_head = NULL; struct set_t *temp_set_it = NULL; struct set_t *set_it = set2; // Loop through every element in set2 while(set_it != NULL) { // Element must not exist in set1 if(!set_contains(set1, set_it->value)) { if(temp_set_head == NULL) { temp_set_head = new_set(set_it->value); temp_set_it = temp_set_head; } else { temp_set_it->next = set_add(temp_set_head, set_it->value); if(temp_set_it->next != NULL) temp_set_it = temp_set_it->next; } } set_it = set_it->next; } return temp_set_head; }
void GameInstSet::reallocate_internal_data() { unit_capacity *= 2; std::vector<InstanceState> new_set(unit_capacity); tset_add_all<GameInstSetFunctions>(&unit_set[0], unit_set.size(), &new_set[0], new_set.size()); unit_set.swap(new_set); //Fix pointers for grid for (int i = 0; i < unit_capacity; i++) { if (unit_set[i].inst) { update_statepointer_for_reallocate_(&unit_set[i].prev_in_grid); update_statepointer_for_reallocate_(&unit_set[i].next_in_grid); update_statepointer_for_reallocate_(&unit_set[i].prev_same_depth); update_statepointer_for_reallocate_(&unit_set[i].next_same_depth); } } DepthMap::iterator it = depthlist_map.begin(); for (; it != depthlist_map.end(); it++) { update_depthlist_for_reallocate_(it->second); } for (int i = 0; i < grid_w * grid_h; i++) { update_depthlist_for_reallocate_(unit_grid[i]); } }
// Performs an intersection of two sets struct set_t *set_intersection(struct set_t *set1, struct set_t *set2) { if(set1 == EMPTY || set2 == EMPTY) return EMPTY; struct set_t *temp_set_head = NULL; struct set_t *temp_set_it = NULL; struct set_t *set_it = set1; // Go through every element of set 1 set_it = set1; while(set_it != NULL) { // See if the elements value is also in set2 if(set_contains(set2, set_it->value)) { if(temp_set_head == NULL) { temp_set_head = new_set(set_it->value); temp_set_it = temp_set_head; } else { // set_add will resolve any attempts to add duplicate elements temp_set_it->next = set_add(temp_set_head, set_it->value); if(temp_set_it->next != NULL) temp_set_it = temp_set_it->next; } } set_it = set_it->next; } return temp_set_head; }
Set sub_set(Set s1,Set s2) { int i; Set result = new_set(); for(i=0;i < SET_SIZE / unit_size;i++) result[i] = s1[i] & (~s2[i]); return result; }
Set intersect_set(Set s1,Set s2) { int i; Set result = new_set(); for(i=0;i < SET_SIZE / unit_size;i++) result[i] = s1[i] & s2[i]; return result; }
void test_basic(){ set_t *set = new_set(10); int i, j; for (i=0; i < 10; i++){ for (j=0; j < 10; j++){ set_put(set, i); } } assert(set_size(set) == 10); delete_set(set); }
/************************************************************ * simple constructor code for bnf_index. * ************************************************************/ bnf_index* new_bnf_index(int i, int p, int pp, cset* lookaheads) { bnf_index* bi=(bnf_index*)malloc(sizeof(bnf_index)); bi->item=i; bi->product=p; bi->product_part=pp; lookaheads? bi->lookaheads=cset_copy(lookaheads): bi->lookaheads=new_set(compare_bnf_lookaheads); return bi; }
void build_node_metasets_aux(nodemetaset_t node_metasets[], uint n_nodes, uint index) { // since it is pointless to consider sets of size 0, index 0 is used for sets // of size 1 and so on. uint size = index + 1; // base case -- stop after reaching the full size if(size > n_nodes) return; // bootstrap -- unitary sets must be built by hand else if(size == 1) { // size 1 is initialised with a unitary set containing the starting node nodeset_t new_set; new_set.insert(0); node_metasets[index].insert(new_set); } // recursion -- will out each size based on previous sizes else { nodemetaset_t& previous = node_metasets[index-1]; // we'll be adding each node identifier to what already exists for(uint node_i = 0; node_i < n_nodes; node_i++) { // for each set of sets that is 1 smaller than the one we're currently // trying to build for(nodemetaset_it nms_i = previous.begin(); nms_i != previous.end(); nms_i++) { // copy each set and add a different identifier to the copy each time nodeset_t new_set((*nms_i)); new_set.insert(node_i); // add the result to the node-set set we're currently building, but only // if the size is correct, that is we didn't add something that was // already there if(new_set.size() == size) node_metasets[index].insert(new_set); } } } // move on to the next size - remember that we incremented size at the start: // it will be incremented again at the beginning of the next step ! build_node_metasets_aux(node_metasets, n_nodes, index+1); }
void test_removing(){ set_t *set = new_set(0); int i; for (i=0; i < 1000; i++){ set_put(set, i); } assert(set_size(set) == 1000); set_optimize(set); for (i=0; i < 1000; i += 2){ set_remove(set, i); } assert(set_size(set) == 500); for (i=0; i < 1000; i += 2){ set_put(set, i); } assert(set_size(set) == 1000); delete_set(set); }
/****************************************** * returns all bnf_index's within a set * * who's positions are greater than zero. * * these are known as the kernels of an * * item set. * ******************************************/ cset* bnf_get_item_kernels(cset* item) { cset* kernels=0; slist_elem* sle=0; if(!item||!item->members) return 0; kernels=new_set(compare_bnf_index); sle=item->members->_head; for(;sle;sle=sle->_next) if(((bnf_index*)sle->_data)->product_part>0)/*any product not at 0 is a kernel*/ cset_add(kernels,sle->_data); else if(((bnf_index*)sle->_data)->item==0&&/*recognise a start symbol aswell*/ ((bnf_index*)sle->_data)->product_part==0) cset_add(kernels,sle->_data); return kernels; }
// Add an element (val) to a non-empty set if it doesn't already exist in set // Returns the previously added set_t object or NULL if nothing was added struct set_t *set_add(struct set_t *set, const char *val) { if(set == NULL || val == NULL) return NULL; // Find the end of the list struct set_t *set_it = set; struct set_t *set_last = set; while(set_it != NULL) { // Val already exists, return NULL if(strcmp(set_it->value, val) == 0) return NULL; set_last = set_it; set_it = set_it->next; } // Create a new set_t object. Add to the end of the list and copy the value set_last->next = new_set(val); return set_last->next; }
void test_picking(){ int n = 10000; set_t *set = new_set(0); int i; for (i=0; i < n; i++){ set_put(set, i); } assert(set_size(set) == n); set_entry_t *p = set_head(set); for (i=0; i < n; i++){ assert(p->key == i); p = p->next; } for (i=0; i < 3*n; i++){ int v = set_get_random(set); assert(v < n); } delete_set(set); }
lc_arg_env_t *lc_arg_new_env(void) { lc_arg_env_t *env = XMALLOCZ(lc_arg_env_t); env->args = new_set(lc_arg_cmp, 16); return env; }
static struct rr* nsec3_parse(char *name, long ttl, int type, char *s) { struct rr_nsec3 *rr = getmem(sizeof(*rr)); struct rr *ret_rr; struct binary_data bitmap; int i; int opt_out = 0; char *str_type = NULL; int ltype; i = extract_integer(&s, "hash algorithm"); if (i < 0) return NULL; if (i > 255) return bitch("bad hash algorithm value"); if (i != 1) return bitch("unrecognized or unsupported hash algorithm"); rr->hash_algorithm = i; i = extract_integer(&s, "flags"); if (i < 0) return NULL; if (i > 255) return bitch("bad flags value"); if (!(i == 0 || i == 1)) return bitch("unsupported flags value"); if (i == 1) opt_out = 1; rr->flags = i; i = extract_integer(&s, "iterations"); if (i < 0) return NULL; if (i > 2500) return bitch("bad iterations value"); rr->iterations = i; /* TODO validate iteration count according to key size, * as per http://tools.ietf.org/html/rfc5155#section-10.3 */ if (*s == '-') { rr->salt.length = 0; rr->salt.data = NULL; s++; if (*s && !isspace(*s) && *s != ';' && *s != ')') return bitch("salt is not valid"); s = skip_white_space(s); } else { rr->salt = extract_hex_binary_data(&s, "salt", EXTRACT_DONT_EAT_WHITESPACE); if (rr->salt.length <= 0) return NULL; if (rr->salt.length > 255) return bitch("salt is too long"); } rr->next_hashed_owner = extract_base32hex_binary_data(&s, "next hashed owner"); bitmap = new_set(); while (s && *s) { str_type = extract_label(&s, "type list", "temporary"); if (!str_type) return NULL; ltype = str2rdtype(str_type); add_bit_to_set(&bitmap, ltype); } if (!s) return NULL; rr->type_bitmap = compressed_set(&bitmap); ret_rr = store_record(type, name, ttl, rr); if (ret_rr) { G.nsec3_present = 1; if (opt_out) G.nsec3_opt_out_present = 1; } return ret_rr; }
static void *magma_ssytrd_sb2st_parallel_section(void *arg) { magma_int_t my_core_id = ((magma_sbulge_id_data*)arg) -> id; magma_sbulge_data* data = ((magma_sbulge_id_data*)arg) -> data; magma_int_t allcores_num = data -> threads_num; magma_int_t n = data -> n; magma_int_t nb = data -> nb; magma_int_t nbtiles = data -> nbtiles; magma_int_t grsiz = data -> grsiz; magma_int_t Vblksiz = data -> Vblksiz; magma_int_t compT = data -> compT; float *A = data -> A; magma_int_t lda = data -> lda; float *V = data -> V; magma_int_t ldv = data -> ldv; float *TAU = data -> TAU; float *T = data -> T; magma_int_t ldt = data -> ldt; volatile magma_int_t* prog = data -> prog; pthread_barrier_t* barrier = &(data -> barrier); //magma_int_t sys_corenbr = 1; #ifdef ENABLE_TIMER real_Double_t timeB=0.0, timeT=0.0; #endif // with MKL and when using omp_set_num_threads instead of mkl_set_num_threads // it need that all threads setting it to 1. magma_setlapack_numthreads(1); #ifdef MAGMA_SETAFFINITY //#define PRINTAFFINITY #ifdef PRINTAFFINITY affinity_set print_set; print_set.print_affinity(my_core_id, "starting affinity"); #endif affinity_set original_set; affinity_set new_set(my_core_id); int check = 0; int check2 = 0; // bind threads check = original_set.get_affinity(); if (check == 0) { check2 = new_set.set_affinity(); if (check2 != 0) printf("Error in sched_setaffinity (single cpu)\n"); } else { printf("Error in sched_getaffinity\n"); } #ifdef PRINTAFFINITY print_set.print_affinity(my_core_id, "set affinity"); #endif #endif if(compT==1) { /* compute the Q1 overlapped with the bulge chasing+T. * if all_cores_num=1 it call Q1 on GPU and then bulgechasing. * otherwise the first thread run Q1 on GPU and * the other threads run the bulgechasing. * */ if(allcores_num==1) { //========================= // bulge chasing //========================= #ifdef ENABLE_TIMER timeB = magma_wtime(); #endif magma_stile_bulge_parallel(0, 1, A, lda, V, ldv, TAU, n, nb, nbtiles, grsiz, Vblksiz, prog); #ifdef ENABLE_TIMER timeB = magma_wtime()-timeB; printf(" Finish BULGE timing= %f \n" ,timeB); #endif //========================= // compute the T's to be used when applying Q2 //========================= #ifdef ENABLE_TIMER timeT = magma_wtime(); #endif magma_stile_bulge_computeT_parallel(0, 1, V, ldv, TAU, T, ldt, n, nb, Vblksiz); #ifdef ENABLE_TIMER timeT = magma_wtime()-timeT; printf(" Finish T's timing= %f \n" ,timeT); #endif }else{ // allcore_num > 1 magma_int_t id = my_core_id; magma_int_t tot = allcores_num; //========================= // bulge chasing //========================= #ifdef ENABLE_TIMER if(id == 0) timeB = magma_wtime(); #endif magma_stile_bulge_parallel(id, tot, A, lda, V, ldv, TAU, n, nb, nbtiles, grsiz, Vblksiz, prog); pthread_barrier_wait(barrier); #ifdef ENABLE_TIMER if(id == 0){ timeB = magma_wtime()-timeB; printf(" Finish BULGE timing= %f \n" ,timeB); } #endif //========================= // compute the T's to be used when applying Q2 //========================= #ifdef ENABLE_TIMER if(id == 0) timeT = magma_wtime(); #endif magma_stile_bulge_computeT_parallel(id, tot, V, ldv, TAU, T, ldt, n, nb, Vblksiz); pthread_barrier_wait(barrier); #ifdef ENABLE_TIMER if (id == 0){ timeT = magma_wtime()-timeT; printf(" Finish T's timing= %f \n" ,timeT); } #endif } // allcore == 1 }else{ // WANTZ = 0 //========================= // bulge chasing //========================= #ifdef ENABLE_TIMER if(my_core_id == 0) timeB = magma_wtime(); #endif magma_stile_bulge_parallel(my_core_id, allcores_num, A, lda, V, ldv, TAU, n, nb, nbtiles, grsiz, Vblksiz, prog); pthread_barrier_wait(barrier); #ifdef ENABLE_TIMER if(my_core_id == 0){ timeB = magma_wtime()-timeB; printf(" Finish BULGE timing= %f \n" ,timeB); } #endif } // WANTZ > 0 #ifdef MAGMA_SETAFFINITY // unbind threads if (check == 0){ check2 = original_set.set_affinity(); if (check2 != 0) printf("Error in sched_setaffinity (restore cpu list)\n"); } #ifdef PRINTAFFINITY print_set.print_affinity(my_core_id, "restored_affinity"); #endif #endif return 0; }
|<product>T(ACTION){printf(\"body:<product>T(ACTION)\\n\");}\ |<body>T(PIPE)<body>{printf(\"body:<body>T(PIPE)<body>\\n\");}\ ;\ product:<part>{printf(\"product:<part>\\n\");}\ |<product><part>{printf(\"product:<product><part>\\n\");}\ ;\ part:T(TOKEN){printf(\"part:T(TOKEN)\\n\");}\ |T(PRODUCT){printf(\"part:T(PRODUCT)\\n\");}\ ;\ */ bool success=false; bnf_parse_item* item=0; static slist* product=new_slist(); static char* temp_val=0; cset* products=new_set(comp_str); cset* tokens=new_set(comp_str); cset* actions=new_set(comp_str); //beginning of stack data structure used by this parser struct pstack_elem{void *_data;pstack_elem *_next;}; struct pcstack{size_t size;pstack_elem* first;}; pcstack* new_pcstack() { pcstack* pcs((pcstack*)malloc(sizeof(pcstack))); memset((void*)pcs,0,sizeof(pcstack)); pcs->size=0; pcs->first=0; return pcs; } void* peek_stack(pcstack* stack)
void init_ident(void) { /* it's ok to use memcmp here, we check only strings */ id_set = new_set(memcmp, 128); obstack_init(&id_obst); }
//################################################################################################## static void *magma_zapplyQ_m_parallel_section(void *arg) { magma_int_t my_core_id = ((magma_zapplyQ_m_id_data*)arg) -> id; magma_zapplyQ_m_data* data = ((magma_zapplyQ_m_id_data*)arg) -> data; magma_int_t ngpu = data -> ngpu; magma_int_t allcores_num = data -> threads_num; magma_int_t n = data -> n; magma_int_t ne = data -> ne; magma_int_t n_gpu = data -> n_gpu; magma_int_t nb = data -> nb; magma_int_t Vblksiz = data -> Vblksiz; magmaDoubleComplex *E = data -> E; magma_int_t lde = data -> lde; magmaDoubleComplex *V = data -> V; magma_int_t ldv = data -> ldv; magmaDoubleComplex *TAU = data -> TAU; magmaDoubleComplex *T = data -> T; magma_int_t ldt = data -> ldt; pthread_barrier_t* barrier = &(data -> barrier); magma_int_t info; #ifdef ENABLE_TIMER real_Double_t timeQcpu=0.0, timeQgpu=0.0; #endif magma_int_t n_cpu = ne - n_gpu; // with MKL and when using omp_set_num_threads instead of mkl_set_num_threads // it need that all threads setting it to 1. magma_set_lapack_numthreads(1); #ifdef MAGMA_SETAFFINITY //#define PRINTAFFINITY #ifdef PRINTAFFINITY affinity_set print_set; print_set.print_affinity(my_core_id, "starting affinity"); #endif affinity_set original_set; affinity_set new_set(my_core_id); int check = 0; int check2 = 0; // bind threads check = original_set.get_affinity(); if (check == 0) { check2 = new_set.set_affinity(); if (check2 != 0) printf("Error in sched_setaffinity (single cpu)\n"); } else { printf("Error in sched_getaffinity\n"); } #ifdef PRINTAFFINITY print_set.print_affinity(my_core_id, "set affinity"); #endif #endif if (my_core_id == 0) { //============================================= // on GPU on thread 0: // - apply V2*Z(:,1:N_GPU) //============================================= #ifdef ENABLE_TIMER timeQgpu = magma_wtime(); #endif magma_zbulge_applyQ_v2_m(ngpu, MagmaLeft, n_gpu, n, nb, Vblksiz, E, lde, V, ldv, T, ldt, &info); magma_device_sync(); #ifdef ENABLE_TIMER timeQgpu = magma_wtime()-timeQgpu; printf(" Finish Q2_GPU GGG timing= %f\n", timeQgpu); #endif } else { //============================================= // on CPU on threads 1:allcores_num-1: // - apply V2*Z(:,N_GPU+1:NE) //============================================= #ifdef ENABLE_TIMER if (my_core_id == 1) timeQcpu = magma_wtime(); #endif magma_int_t n_loc = magma_ceildiv(n_cpu, allcores_num-1); magmaDoubleComplex* E_loc = E + (n_gpu+ n_loc * (my_core_id-1))*lde; n_loc = min(n_loc,n_cpu - n_loc * (my_core_id-1)); magma_ztile_bulge_applyQ(my_core_id, MagmaLeft, n_loc, n, nb, Vblksiz, E_loc, lde, V, ldv, TAU, T, ldt); pthread_barrier_wait(barrier); #ifdef ENABLE_TIMER if (my_core_id == 1) { timeQcpu = magma_wtime()-timeQcpu; printf(" Finish Q2_CPU CCC timing= %f\n", timeQcpu); } #endif } // END if my_core_id #ifdef MAGMA_SETAFFINITY // unbind threads if (check == 0) { check2 = original_set.set_affinity(); if (check2 != 0) printf("Error in sched_setaffinity (restore cpu list)\n"); } #ifdef PRINTAFFINITY print_set.print_affinity(my_core_id, "restored_affinity"); #endif #endif return 0; }
void Control::ComputeRecompilationSet(TypeDependenceChecker& dependence_checker) { SymbolSet type_trash_set; // // Find out if any source files has been touched since the last // compilation and add all such files to recompilation_file_set. // FindMoreRecentInputFiles(dependence_checker.file_set); // // Before messing with the files, compute a list of all the types that // have just been compiled. We need to do this here as we will be // "Resetting" and "reScanning" some files in the loop below, which in // effect removes the set of types to which they were associated in the // previous compilation. // int length_estimate = input_java_file_set.Size(); // problem size estimate Tuple<TypeSymbol*> input_types(length_estimate * 2); FileSymbol* file_symbol; for (file_symbol = (FileSymbol*) input_java_file_set.FirstElement(); file_symbol; file_symbol = (FileSymbol*) input_java_file_set.NextElement()) { for (unsigned i = 0; i < file_symbol -> types.Length(); i++) input_types.Next() = file_symbol -> types[i]; } // // Declare the closure set, and initialize it with the Union over the // closure of the types in the trash_bin. Essentially, we want to catch // all "compiled" types in the compilation that has a dependence on these // bad types. // SymbolSet dependents_closure(length_estimate); for (unsigned i = 0; i < type_trash_bin.Length(); i++) { TypeSymbol* type = type_trash_bin[i]; if (! dependents_closure.IsElement(type)) { if (type -> dependents_closure) dependents_closure.Union(*type -> dependents_closure); else dependents_closure.AddElement(type); } } // // Compute the set of types from the recompilation set that needs to be // recompiled and update the recompilation file set. // SymbolSet new_set(length_estimate), file_seen(length_estimate); new_set = recompilation_file_set; new_set.Union(expired_file_set); file_seen = new_set; // How much space do we need for a package declaration? estimate 64 tokens. StoragePool* ast_pool = new StoragePool(64); // // As long as there is a new_set of files to process,... // do { // // For each file in new_set, compute the reflexive transitive closure // of all types contained in that file. Next, reset and rescan the // file. If the scan was successful, iterate over the new list of // types to see if any of them had already been introduced in the // previous compilation via a class file. If so, add all such types to // the dependents closure. // for (FileSymbol* file_symbol = (FileSymbol*) new_set.FirstElement(); file_symbol; file_symbol = (FileSymbol*) new_set.NextElement()) { for (unsigned i = 0; i < file_symbol -> types.Length(); i++) { TypeSymbol* type = file_symbol -> types[i]; if (! dependents_closure.IsElement(type)) { if (type -> dependents_closure) dependents_closure.Union(*type -> dependents_closure); else dependents_closure.AddElement(type); } } if (! expired_file_set.IsElement(file_symbol)) { file_symbol -> Reset(); file_symbol -> SetJava(); scanner -> Scan(file_symbol); LexStream* lex_stream = file_symbol -> lex_stream; if (lex_stream) // did we have a successful scan! { AstPackageDeclaration* package_declaration = parser -> PackageHeaderParse(lex_stream, ast_pool); PackageSymbol* package = (package_declaration ? FindOrInsertPackage(lex_stream, package_declaration -> name) : unnamed_package); ast_pool -> Reset(); // // If the file contained more than one type, only the main // one would have been deleted. We now delete the others if // any... // for (unsigned k = 0; k < lex_stream -> NumTypes(); k++) { TokenIndex identifier_token = lex_stream -> Next(lex_stream -> Type(k)); NameSymbol* name_symbol = lex_stream -> NameSymbol(identifier_token); if (name_symbol) { TypeSymbol* type = package -> FindTypeSymbol(name_symbol); if (type && (! dependents_closure.IsElement(type))) { if (type -> dependents_closure) dependents_closure.Union(*type -> dependents_closure); else dependents_closure.AddElement(type); } } } } } } // // Iterate over the dependents_closure set. For each type T, add it to // the trash pile. If the file with which it is associated had not yet // been processed, mark it as having been "seen" and add it to the // new_set to be considered later. If the file had already been // processed but not yet added to the recompilation set, add it to the // recompilation set, read it in and if it contains types other than // the main one (that had previously been read in via class files) add // those new types to the trash pile. // new_set.SetEmpty(); TypeSymbol* type; for (type = (TypeSymbol*) dependents_closure.FirstElement(); type; type = (TypeSymbol*) dependents_closure.NextElement()) { type_trash_set.AddElement(type); FileSymbol* file_symbol = type -> file_symbol; if (file_symbol && (! file_seen.IsElement(file_symbol))) { file_seen.AddElement(file_symbol); new_set.AddElement(file_symbol); file_symbol -> mtime = 0; // to force a reread of the file. } } // // Check that the files in new_set exist, and if so, add them to the // recompilation_file_set. Note that if they exist, they will be added // because before a file is added to new_set its time stamp is reset // to 0. See loop above... // FindMoreRecentInputFiles(new_set); // // Empty out the dependents_closure set for the next round. // dependents_closure.SetEmpty(); } while (! new_set.IsEmpty()); delete ast_pool; // // Clean up the types that were compiled in the previous compilation pass. // for (unsigned j = 0; j < input_types.Length(); j++) input_types[j] -> RemoveCompilationReferences(); // // Reset the closure sets in all the types that were considered in the // dependence checker. // Tuple<TypeSymbol*>& type_list = dependence_checker.TypeList(); for (unsigned k = 0; k < type_list.Length(); k++) { TypeSymbol* type = type_list[k]; type -> index = TypeCycleChecker::OMEGA; type -> unit_index = TypeCycleChecker::OMEGA; type -> incremental_index = TypeCycleChecker::OMEGA; delete type -> dependents_closure; type -> dependents_closure = NULL; } // // Remove all dependence edges that are no longer valid. // RemoveTrashedTypes(type_trash_set); }
static void *magma_dsytrd_sb2st_parallel_section(void *arg) { magma_int_t my_core_id = ((magma_dbulge_id_data*)arg) -> id; magma_dbulge_data* data = ((magma_dbulge_id_data*)arg) -> data; magma_int_t allcores_num = data -> threads_num; magma_int_t n = data -> n; magma_int_t nb = data -> nb; magma_int_t nbtiles = data -> nbtiles; magma_int_t grsiz = data -> grsiz; magma_int_t Vblksiz = data -> Vblksiz; magma_int_t wantz = data -> wantz; double *A = data -> A; magma_int_t lda = data -> lda; double *V = data -> V; magma_int_t ldv = data -> ldv; double *TAU = data -> TAU; double *T = data -> T; magma_int_t ldt = data -> ldt; volatile magma_int_t* prog = data -> prog; pthread_barrier_t* myptbarrier = &(data -> myptbarrier); //magma_int_t sys_corenbr = 1; #ifdef ENABLE_TIMER real_Double_t timeB=0.0, timeT=0.0; #endif // with MKL and when using omp_set_num_threads instead of mkl_set_num_threads // it need that all threads setting it to 1. //magma_set_omp_numthreads(1); magma_set_lapack_numthreads(1); magma_set_omp_numthreads(1); /* #ifndef MAGMA_NOAFFINITY // bind threads cpu_set_t set; // bind threads CPU_ZERO( &set ); CPU_SET( my_core_id, &set ); sched_setaffinity( 0, sizeof(set), &set); #endif magma_set_lapack_numthreads(1); magma_set_omp_numthreads(1); */ #ifndef MAGMA_NOAFFINITY //#define PRINTAFFINITY #ifdef PRINTAFFINITY affinity_set print_set; print_set.print_affinity(my_core_id, "starting affinity"); #endif affinity_set original_set; affinity_set new_set(my_core_id); magma_int_t check = 0; magma_int_t check2 = 0; // bind threads check = original_set.get_affinity(); if (check == 0) { check2 = new_set.set_affinity(); if (check2 != 0) printf("Error in sched_setaffinity (single cpu)\n"); } else { printf("Error in sched_getaffinity\n"); } #ifdef PRINTAFFINITY print_set.print_affinity(my_core_id, "set affinity"); #endif #endif /* compute the Q1 overlapped with the bulge chasing+T. * if all_cores_num=1 it call Q1 on GPU and then bulgechasing. * otherwise the first thread run Q1 on GPU and * the other threads run the bulgechasing. * */ //========================= // bulge chasing //========================= #ifdef ENABLE_TIMER if (my_core_id == 0) timeB = magma_wtime(); #endif magma_dtile_bulge_parallel(my_core_id, allcores_num, A, lda, V, ldv, TAU, n, nb, nbtiles, grsiz, Vblksiz, wantz, prog, myptbarrier); if (allcores_num > 1) pthread_barrier_wait(myptbarrier); #ifdef ENABLE_TIMER if (my_core_id == 0) { timeB = magma_wtime()-timeB; printf(" Finish BULGE timing= %f\n", timeB); } #endif //========================= // compute the T's to be used when applying Q2 //========================= if ( wantz > 0 ) { #ifdef ENABLE_TIMER if (my_core_id == 0) timeT = magma_wtime(); #endif magma_dtile_bulge_computeT_parallel(my_core_id, allcores_num, V, ldv, TAU, T, ldt, n, nb, Vblksiz); if (allcores_num > 1) pthread_barrier_wait(myptbarrier); #ifdef ENABLE_TIMER if (my_core_id == 0) { timeT = magma_wtime()-timeT; printf(" Finish T's timing= %f\n", timeT); } #endif } #ifndef MAGMA_NOAFFINITY // unbind threads if (check == 0) { check2 = original_set.set_affinity(); if (check2 != 0) printf("Error in sched_setaffinity (restore cpu list)\n"); } #ifdef PRINTAFFINITY print_set.print_affinity(my_core_id, "restored_affinity"); #endif #endif return 0; }
static struct EvalError *check_stdmacro( enum StandardMacro stdmacro, struct Expression *args, size_t n) { size_t length; struct Expression expr; struct Set *set; switch (stdmacro) { case F_DEFINE: case F_SET: if (args[0].type != E_SYMBOL) { return new_eval_error_expr(ERR_TYPE_VAR, args[0]); } break; case F_LAMBDA: expr = args[0]; if (expr.type != E_NULL && expr.type != E_PAIR && expr.type != E_SYMBOL) { return new_syntax_error(expr); } set = new_set(); while (expr.type != E_NULL) { InternId symbol_id; if (expr.type == E_PAIR) { if (expr.box->car.type != E_SYMBOL) { free_set(set); return new_eval_error_expr(ERR_TYPE_VAR, expr.box->car); } symbol_id = expr.box->car.symbol_id; } else if (expr.type == E_SYMBOL) { symbol_id = expr.symbol_id; } else { free_set(set); return new_eval_error_expr(ERR_TYPE_VAR, expr); } if (!add_to_set(set, symbol_id)) { free_set(set); return attach_code( new_eval_error_symbol(ERR_DUP_PARAM, symbol_id), args[0]); } if (expr.type == E_SYMBOL) { break; } expr = expr.box->cdr; } free_set(set); break; case F_UNQUOTE: case F_UNQUOTE_SPLICING: return new_eval_error(ERR_UNQUOTE); case F_COND: for (size_t i = 0; i < n; i++) { if (!count_list(&length, args[i]) || length < 2) { return new_syntax_error(args[i]); } } break; case F_LET: case F_LET_STAR: expr = args[0]; set = new_set(); while (expr.type != E_NULL) { if (expr.type == E_PAIR) { if (!count_list(&length, expr.box->car) || length != 2) { free_set(set); return new_syntax_error(expr.box->car); } if (expr.box->car.box->car.type != E_SYMBOL) { free_set(set); return new_eval_error_expr( ERR_TYPE_VAR, expr.box->car.box->car); } } else { free_set(set); return new_syntax_error(args[0]); } InternId symbol_id = expr.box->car.box->car.symbol_id; if (!add_to_set(set, symbol_id)) { free_set(set); return attach_code( new_eval_error_symbol(ERR_DUP_PARAM, symbol_id), args[0]); } expr = expr.box->cdr; } free_set(set); break; default: break; } return NULL; }