void complete_pairwise_cluster(WorkPtr work, int i, int * candidates, int num_cands) { int s, limit; int j, rootI, rootJ; int num_mat_pos, num_mat_rc; if (clonelink) clone_link(work); if (IGNORE_SEQ(i)) return; if (seqInfo[i].len<window_len) return; set_up_word_table(work, i); for(s=0; s<num_cands; s++) { j = candidates[s]; //printf(" Candidated %d\n",j); rootI = mini_find_parent(i); rootJ = mini_find_parent(j); assert(i<num_seqs); if ((rootI != rootJ) && !(IS_FLAG(FIX,rootI)&&IS_FLAG(FIX,rootJ) )) { // only try to cluster if not already clustered // and both are not fixed lcusters NUM_num_matches++; limit = seqInfo[j].len/BASESPERELT-1; check_heuristics(i,j,limit,&num_mat_pos,&num_mat_rc); if( (num_mat_pos >= NM_threshold) && (!IGNORE_SEQ(j)) && (d2(work,i,j,0)<=theta)) { merge_pair(work,rootI,i,rootJ,j,0); }; if ( rc_check && (num_mat_rc >= NM_threshold)&& (!IGNORE_SEQ(j)) && (d2(work,i,j,1)<=theta)){ merge_pair(work,rootI,i,rootJ,j,1); } } } clear_word_table(work,i); PLOCK(&find_parent_mutex); for(s=0; s<num_cands; s++) tree[candidates[s]].cluster = find_parent(candidates[s]); PUNLOCK(&find_parent_mutex); }
static struct list* seq_sort_core(struct list *data) { struct list *dst = NULL; while (data) { struct list *next = data->next; if (!next) { // take any odd/even padding as it is data->next = dst; dst = data; break; } // take the current sub-list and the next one and merge them into one merge_pair(&data->slist, data->slist, next->slist); data->next = dst; dst = data; // free the just processed sub-list and jump to the next pair data = next->next; free(next); } return dst; }
/* Merge pairs of consecutive leaves in "leaves" taking into account * the intersection of validity and proximity schedule constraints "dep". * * If a leaf has been merged with the next leaf, then the combination * is checked again for merging with the next leaf. * That is, if the leaves are A, B and C, then B may not have been * merged with C, but after merging A and B, it could still be useful * to merge the combination AB with C. * * Two leaves A and B are merged if there are instances of at least * one pair of statements, one statement in A and one B, such that * the validity and proximity schedule constraints between them * make them suitable for merging according to check_merge. * * Return the final number of leaves in the sequence, or -1 on error. */ static int merge_leaves(int n, struct ppcg_grouping_leaf leaves[n], __isl_keep isl_union_map *dep) { int i; struct ppcg_merge_leaves_data data; for (i = n - 1; i >= 0; --i) { isl_union_map *dep_i; isl_stat ok; if (i + 1 >= n) continue; dep_i = isl_union_map_copy(dep); dep_i = isl_union_map_intersect_domain(dep_i, isl_union_set_copy(leaves[i].domain)); dep_i = isl_union_map_intersect_range(dep_i, isl_union_set_copy(leaves[i + 1].domain)); data.merge = 0; data.src = &leaves[i]; data.dst = &leaves[i + 1]; ok = isl_union_map_foreach_map(dep_i, &check_merge, &data); isl_union_map_free(dep_i); if (ok < 0 && !data.merge) return -1; if (!data.merge) continue; if (merge_pair(n, leaves, i) < 0) return -1; --n; ++i; } return n; }
static struct list *seq_sort_core(struct list *data ) { struct list *dst ; struct list *next ; void *__cil_tmp4 ; unsigned int __cil_tmp5 ; unsigned int __cil_tmp6 ; unsigned int __cil_tmp7 ; unsigned int __cil_tmp8 ; struct node **__cil_tmp9 ; struct node *__cil_tmp10 ; struct node *__cil_tmp11 ; unsigned int __cil_tmp12 ; unsigned int __cil_tmp13 ; unsigned int __cil_tmp14 ; unsigned int __cil_tmp15 ; void *__cil_tmp16 ; { #line 53 __cil_tmp4 = (void *)0; #line 53 dst = (struct list *)__cil_tmp4; { #line 55 while (1) { while_1_continue: /* CIL Label */ ; #line 55 if (data) { } else { goto while_1_break; } #line 56 __cil_tmp5 = (unsigned int )data; #line 56 __cil_tmp6 = __cil_tmp5 + 4; #line 56 next = *((struct list **)__cil_tmp6); #line 57 if (! next) { #line 59 __cil_tmp7 = (unsigned int )data; #line 59 __cil_tmp8 = __cil_tmp7 + 4; #line 59 *((struct list **)__cil_tmp8) = dst; #line 60 dst = data; goto while_1_break; } else { } { #line 65 __cil_tmp9 = (struct node **)data; #line 65 __cil_tmp10 = *((struct node **)data); #line 65 __cil_tmp11 = *((struct node **)next); #line 65 merge_pair(__cil_tmp9, __cil_tmp10, __cil_tmp11); #line 66 __cil_tmp12 = (unsigned int )data; #line 66 __cil_tmp13 = __cil_tmp12 + 4; #line 66 *((struct list **)__cil_tmp13) = dst; #line 67 dst = data; #line 70 __cil_tmp14 = (unsigned int )next; #line 70 __cil_tmp15 = __cil_tmp14 + 4; #line 70 data = *((struct list **)__cil_tmp15); #line 71 __cil_tmp16 = (void *)next; #line 71 free(__cil_tmp16); } } while_1_break: /* CIL Label */ ; } #line 74 return (dst); } }
unsigned int seq_sort_core(unsigned int data) { TRACK_VALUE[6] = data; TRACK_VALUE[7] = 0; if (!TRACK_VALUE[6]) { goto __exit_loop_2; } { TRACK_VALUE[8] = READ_NORMAL(TRACK_VALUE[6] + 1); if (!TRACK_VALUE[8]) { WRITE_NORMAL(TRACK_VALUE[6] + 1, TRACK_VALUE[7]); TRACK_VALUE[7] = TRACK_VALUE[6]; goto __exit_loop_2; } merge_pair(TRACK_VALUE[6], READ_NORMAL(TRACK_VALUE[6]), READ_NORMAL(TRACK_VALUE[8])); WRITE_NORMAL(TRACK_VALUE[6] + 1, TRACK_VALUE[7]); TRACK_VALUE[7] = TRACK_VALUE[6]; TRACK_VALUE[6] = READ_NORMAL(TRACK_VALUE[8] + 1); my_free(TRACK_VALUE[8]); } if (!TRACK_VALUE[6]) { goto __exit_loop_2; } { TRACK_VALUE[8] = READ_NORMAL(TRACK_VALUE[6] + 1); if (!TRACK_VALUE[8]) { WRITE_NORMAL(TRACK_VALUE[6] + 1, TRACK_VALUE[7]); TRACK_VALUE[7] = TRACK_VALUE[6]; goto __exit_loop_2; } merge_pair(TRACK_VALUE[6], READ_NORMAL(TRACK_VALUE[6]), READ_NORMAL(TRACK_VALUE[8])); WRITE_NORMAL(TRACK_VALUE[6] + 1, TRACK_VALUE[7]); TRACK_VALUE[7] = TRACK_VALUE[6]; TRACK_VALUE[6] = READ_NORMAL(TRACK_VALUE[8] + 1); my_free(TRACK_VALUE[8]); } if (!TRACK_VALUE[6]) { goto __exit_loop_2; } { TRACK_VALUE[8] = READ_NORMAL(TRACK_VALUE[6] + 1); if (!TRACK_VALUE[8]) { WRITE_NORMAL(TRACK_VALUE[6] + 1, TRACK_VALUE[7]); TRACK_VALUE[7] = TRACK_VALUE[6]; goto __exit_loop_2; } merge_pair(TRACK_VALUE[6], READ_NORMAL(TRACK_VALUE[6]), READ_NORMAL(TRACK_VALUE[8])); WRITE_NORMAL(TRACK_VALUE[6] + 1, TRACK_VALUE[7]); TRACK_VALUE[7] = TRACK_VALUE[6]; TRACK_VALUE[6] = READ_NORMAL(TRACK_VALUE[8] + 1); my_free(TRACK_VALUE[8]); } if (!TRACK_VALUE[6]) { goto __exit_loop_2; } { TRACK_VALUE[8] = READ_NORMAL(TRACK_VALUE[6] + 1); if (!TRACK_VALUE[8]) { WRITE_NORMAL(TRACK_VALUE[6] + 1, TRACK_VALUE[7]); TRACK_VALUE[7] = TRACK_VALUE[6]; goto __exit_loop_2; } merge_pair(TRACK_VALUE[6], READ_NORMAL(TRACK_VALUE[6]), READ_NORMAL(TRACK_VALUE[8])); WRITE_NORMAL(TRACK_VALUE[6] + 1, TRACK_VALUE[7]); TRACK_VALUE[7] = TRACK_VALUE[6]; TRACK_VALUE[6] = READ_NORMAL(TRACK_VALUE[8] + 1); my_free(TRACK_VALUE[8]); } if (!TRACK_VALUE[6]) { goto __exit_loop_2; } { TRACK_VALUE[8] = READ_NORMAL(TRACK_VALUE[6] + 1); if (!TRACK_VALUE[8]) { WRITE_NORMAL(TRACK_VALUE[6] + 1, TRACK_VALUE[7]); TRACK_VALUE[7] = TRACK_VALUE[6]; goto __exit_loop_2; } merge_pair(TRACK_VALUE[6], READ_NORMAL(TRACK_VALUE[6]), READ_NORMAL(TRACK_VALUE[8])); WRITE_NORMAL(TRACK_VALUE[6] + 1, TRACK_VALUE[7]); TRACK_VALUE[7] = TRACK_VALUE[6]; TRACK_VALUE[6] = READ_NORMAL(TRACK_VALUE[8] + 1); my_free(TRACK_VALUE[8]); } if (!TRACK_VALUE[6]) { goto __exit_loop_2; } { TRACK_VALUE[8] = READ_NORMAL(TRACK_VALUE[6] + 1); if (!TRACK_VALUE[8]) { WRITE_NORMAL(TRACK_VALUE[6] + 1, TRACK_VALUE[7]); TRACK_VALUE[7] = TRACK_VALUE[6]; goto __exit_loop_2; } merge_pair(TRACK_VALUE[6], READ_NORMAL(TRACK_VALUE[6]), READ_NORMAL(TRACK_VALUE[8])); WRITE_NORMAL(TRACK_VALUE[6] + 1, TRACK_VALUE[7]); TRACK_VALUE[7] = TRACK_VALUE[6]; TRACK_VALUE[6] = READ_NORMAL(TRACK_VALUE[8] + 1); my_free(TRACK_VALUE[8]); } if (!TRACK_VALUE[6]) { goto __exit_loop_2; } { TRACK_VALUE[8] = READ_NORMAL(TRACK_VALUE[6] + 1); if (!TRACK_VALUE[8]) { WRITE_NORMAL(TRACK_VALUE[6] + 1, TRACK_VALUE[7]); TRACK_VALUE[7] = TRACK_VALUE[6]; goto __exit_loop_2; } merge_pair(TRACK_VALUE[6], READ_NORMAL(TRACK_VALUE[6]), READ_NORMAL(TRACK_VALUE[8])); WRITE_NORMAL(TRACK_VALUE[6] + 1, TRACK_VALUE[7]); TRACK_VALUE[7] = TRACK_VALUE[6]; TRACK_VALUE[6] = READ_NORMAL(TRACK_VALUE[8] + 1); my_free(TRACK_VALUE[8]); } assume(!TRACK_VALUE[6]); __exit_loop_2: ; return TRACK_VALUE[7]; }