mclv* reduce_v ( const mclv* u ) { mclv* v = mclvClone(u) ; dim n = v->n_ivps ; double s = mclvSum(v) ; double sq = mclvPowSum(v, 2.0) ; if (s) mclvSelectGqBar(v, 0.25 * sq / s) ;fprintf(stderr, "from %d to %d entries\n", (int) n, (int) v->n_ivps) ; return v ; }
static mclv* matrix_vector ( const mclx* mx , const mclv* vec ) { mclv* res = mclvClone(mx->dom_rows) ; dim i, j ; mclvMakeConstant(res, 0.0) ; for (i=0;i<vec->n_ivps;i++) { mclv* c = mx->cols + vec->ivps[i].idx ; for (j=0;j<c->n_ivps;j++) res->ivps[c->ivps[j].idx].val += 1.0 ; } mclvUnary(res, fltxCopy, NULL) ; return res ; }
static mclx* get_base ( const char* fn_input , mclx* mx_input , const char* b1opts , const char* b2opts ) { mclAlgParam* mlp = NULL ; mcxstatus status = STATUS_FAIL ; mclx* mxbase = NULL ; get_interface ( &mlp , fn_input , b2opts ? b2opts : b1opts , NULL , mx_input , b2opts ? ALG_CACHE_EXPANDED : ALG_CACHE_START , EXIT_ON_FAIL ) ; if (b2opts) { mlp->mpp->mainLoopLength = 1 ; mlp->mpp->mainInflation = 1.0 ; mlp->expand_only = TRUE ; mcxLog(MCX_LOG_APP, me, "computing expanded matrix") ; if ((status = mclAlgorithm(mlp)) == STATUS_FAIL) { mcxErr(me, "failed") ; mcxExit(1) ; } mcxLog(MCX_LOG_APP, me, "done computing expanded matrix") ; mxbase = mclAlgParamRelease(mlp, mlp->mx_expanded) ; } else { if (mclAlgorithmStart(mlp, FALSE)) mcxDie(1, me, "-b1 option start-up failed") ; mxbase = mclAlgParamRelease(mlp, mlp->mx_start) ; start_col_sums_g = mclvClone(mlp->mx_start_sums) ; } mclAlgParamFree(&mlp, TRUE) ; return mxbase ; }
static mclv* run_through ( const mclx* mx ) { mclv* starts = mclvClone(mx->dom_cols) ; dim n_starts_previous = starts->n_ivps + 1 ; dim n_steps = 0 ; while (n_starts_previous > starts->n_ivps) { mclv* new_starts = matrix_vector(mx, starts) ; mclvMakeCharacteristic(new_starts) ; n_starts_previous = starts->n_ivps ; mclvFree(&starts) ; starts = new_starts ; n_steps++ ; fputc('.', stderr) ; } if (n_steps) fputc('\n', stderr) ; return starts ; }
mclVector* mclvCanonicalEmbed ( mclv* dst , const mclv* src , dim nr , double val ) { mclIvp* ivp ; dim d = 0 ; mclv* src_clone = NULL ; if (dst == src) src_clone = mclvClone(src) , src = src_clone ; dst = mclvResize(dst, nr) /* set everything to val */ ; ivp = dst->ivps ; while (ivp < dst->ivps+dst->n_ivps) { ivp->idx = d++ ; (ivp++)->val = val ; } /* insert src values */ /* fixme: use better implementation, * preferably with a callback */ ivp = dst->ivps ; for (d=0;d<src->n_ivps;d++) { ivp = mclvGetIvp(dst, src->ivps[d].idx, ivp) ; if (ivp) ivp->val = src->ivps[d].val ; } if (src_clone) mclvFree(&src_clone) ; return dst ; }
ofs fire_node ( const mclx* mx , dim i , mclv** seenpp ) { mclv* v = mx->cols+i ; mclv* seen = mclvInsertIdx(NULL, v->vid, 1.0) ; mclv* todo = mclvClone(v) ; mcxstatus s = STATUS_OK ; dim level = 0 ;if(0)fprintf(stderr, "node %d\n", (int) i) ; while(todo->n_ivps && !s) s = fire_node_next(mx, seen, todo, i) , level++ ; mclvFree(&todo) ; if (seenpp) seenpp[0] = seen ; else mclvFree(&seen) ; return s ? -1 : level ; }
void pairwise_setops ( mclx* mx1 , mclx* mx2 , mcxbits modes ) { dim t, u, n_tst = 0 ; mclv* cache = mclvInit(NULL) ; mclv* meet = mclvInit(NULL) ; mclv* join = mclvInit(NULL) ; mclv* diff = mclvInit(NULL) ; mcxbool overwrite = modes & MMM_OVERWRITE ; dim n_zero_meet = 0, n_plus_meet = 0 ; mclv* (*fn_meet)(const mclv* lft, const mclv* rgt, mclv* dst) = mcldMeet ; mclv* (*fn_minus)(const mclv* lft, const mclv* rgt, mclv* dst) = mcldMinus1 ; if (modes & MMM_MEET2) fn_meet = mcldMeet2 , fn_minus = mcldMinus /* the point of overwrite is to have * a lft == dst or rgt == dst pattern. */ ; for (t=0;t<N_COLS(mx1);t++) { for (u=0;u<N_COLS(mx2);u++) { mclv* dst = overwrite ? (modes & MMM_RIGHT ? mx1->cols+u : mx2->cols+t) : diff ; if (overwrite) mclvCopy(cache, dst) /* cache column, reinstate later */ ; if (modes & MMM_BINARY) mclvBinary(mx1->cols+t, mx2->cols+u, dst, fltLaNR) ; else fn_minus(mx1->cols+t, mx2->cols+u, dst) /* compute t / u */ ; if (overwrite) mclvCopy(diff, dst) , mclvCopy(dst, cache) /* reinstate column */ /* diff contains t / u */ ; dst = overwrite ? dst : meet /* cache column, same as above */ ; if (modes & MMM_BINARY) mclvBinary(mx1->cols+t, mx2->cols+u, dst, fltLaR) ; else fn_meet(mx1->cols+t, mx2->cols+u, dst) ; if (overwrite) mclvCopy(meet, dst) , mclvCopy(dst, cache) /* meet contains t /\ u */ ; mcldMerge(diff, meet, join) /* join should be identical to column t */ ; if (meet->n_ivps) n_plus_meet++ ; else n_zero_meet++ ; if (modes & MMM_CHECK) { mclv* dediff = mclvClone(mx1->cols+t) ; mclv* demeet = mclvClone(mx1->cols+t) ; dim nd = mclvUpdateMeet(dediff, diff, fltSubtract) ; dim nm = mclvUpdateMeet(demeet, meet, fltSubtract) ; if ( diff->n_ivps + meet->n_ivps != mx1->cols[t].n_ivps || !mcldEquate(join, mx1->cols+t, MCLD_EQT_EQUAL) || diff->n_ivps != nd || meet->n_ivps != nm ) { mclvaDump(mx1->cols+t, stdout, -1, " ", MCLVA_DUMP_HEADER_ON) ; mclvaDump(mx2->cols+u, stdout, -1, " ", MCLVA_DUMP_HEADER_ON) ; mclvaDump(meet, stdout, -1, " ", MCLVA_DUMP_HEADER_ON) ; mclvaDump(diff, stdout, -1, " ", MCLVA_DUMP_HEADER_ON) ; mcxDie(1, me, "rats") ; } mclvFree(&dediff) ; mclvFree(&demeet) ; } n_tst++ ; } } fprintf ( stdout , "meet was nonempty %.2f\n" , (double) (n_plus_meet * 1.0f / n_tst) ) ; fprintf ( stdout , "%d successful tests in %s%s %s mode (checked: %s)\n" , (int) n_tst , overwrite ? "overwrite" : "create" , overwrite ? ( modes & MMM_RIGHT ? "-right" : "-left" ) : "" , modes & MMM_BINARY ? "generic" : "update" , (modes & MMM_CHECK ? "yes" : "no") ) ; fprintf ( stdout , "meet-can: %10lu\n" "meet-zip: %10lu\n" "meet-s/l: %10lu\n" "diff-can: %10lu\n" "diff-zip: %10lu\n" "diff-s/l: %10lu\n" , (ulong) nu_meet_can , (ulong) nu_meet_zip , (ulong) nu_meet_sl , (ulong) nu_diff_can , (ulong) nu_diff_zip , (ulong) nu_diff_sl ) ; mclvFree(&cache) ; mclvFree(&meet) ; mclvFree(&join) ; mclvFree(&diff) ; }
static mclx* make_mx_from_pars ( mclxIOstreamer* streamer , stream_state* iface , void (*ivpmerge)(void* ivp1, const void* ivp2) , mcxbits bits ) { mclpAR* pars = iface->pars ; long dc_max_seen = iface->map_c->max_seen ; long dr_max_seen = iface->map_r->max_seen ; mclx* mx = NULL ; mclv* domc, *domr ; dim i ; if (bits & MCLXIO_STREAM_235ANY) { if (streamer->cmax_235 > 0 && dc_max_seen < streamer->cmax_235 - 1) dc_max_seen = streamer->cmax_235-1 ; } else if (bits & MCLXIO_STREAM_123) { if (streamer->cmax_123 > 0 && dc_max_seen < streamer->cmax_123 - 1) dc_max_seen = streamer->cmax_123-1 ; if (streamer->rmax_123 > 0 && dr_max_seen < streamer->rmax_123 - 1) dr_max_seen = streamer->rmax_123-1 ; } mcxTell("stream", "maxc=%d maxr=%d", (int) dc_max_seen, (int) dr_max_seen) ; if (iface->pars_n_used != iface->map_c->max_seen+1) mcxDie ( 1 , module , "internal discrepancy: n_pars=%lu max_seen+1=%lu" , (ulong) iface->pars_n_used , (ulong) (iface->map_c->max_seen+1) ) ; if (dc_max_seen < 0 || dr_max_seen < 0) { if (dc_max_seen < -1 || dr_max_seen < -1) { mcxErr(module, "bad apples %ld %ld", dc_max_seen, dr_max_seen) ; return NULL ; } else mcxTell(module, "no assignments yield void/empty matrix") ; } /* fixme: with extend and same tab, should still copy. * then, there are still occasions where one would * want to go the sparse route */ domc = iface->map_c->tab && (iface->bits & MCLXIO_STREAM_CTAB_RO) ? mclvClone(iface->map_c->tab->domain) : mclvCanonical(NULL, dc_max_seen+1, 1.0) ; domr = iface->map_r->tab && (iface->bits & MCLXIO_STREAM_RTAB_RO) ? mclvClone(iface->map_r->tab->domain) : mclvCanonical(NULL, dr_max_seen+1, 1.0) ; if (! (mx = mclxAllocZero(domc, domr))) { mclvFree(&domc) ; mclvFree(&domr) ; } else for (i=0;i<iface->pars_n_used;i++) /* careful with signedness */ { long d = domc->ivps[i].idx ;if(DEBUG3)fprintf(stderr, "column %d alloc %d\n", (int) d, (int) iface->pars_n_alloc); ; mclvFromPAR(mx->cols+i, pars+d, 0, ivpmerge, NULL) ; } return mx ; }
double get_score ( const mclv* c , const mclv* d , const mclv* c_start , const mclv* d_start , const mclv* c_end , const mclv* d_end ) { mclv* vecc = mclvClone(c) ; mclv* vecd = mclvClone(d) ; mclv* meet_c = mcldMeet(vecc, vecd, NULL) ; mclv* meet_d = mcldMeet(vecd, meet_c, NULL) ; mclv* cwid = mclvBinary(c_end, c_start, NULL, fltSubtract) ; mclv* dwid = mclvBinary(d_end, d_start, NULL, fltSubtract) ; mclv* rmin = mclvBinary(c_end, d_end, NULL, fltMin) ; mclv* lmax = mclvBinary(c_start, d_start, NULL, fltMax) ; mclv* delta = mclvBinary(rmin, lmax, NULL, fltSubtract) ; mclv* weightc, *weightd ; double ip, cd, csn, meanc, meand, mean, euclid, meet_fraction, score, sum_meet_c, sum_meet_d, reduction_c, reduction_d ; int nmeet = meet_c->n_ivps ; int nldif = vecc->n_ivps - nmeet ; int nrdif = vecd->n_ivps - nmeet ; mclvSelectGqBar(delta, 0.0) ; weightc= mclvBinary(delta, cwid, NULL, mydiv) ; weightd= mclvBinary(delta, dwid, NULL, mydiv) #if 0 ;if (c != d)mclvaDump ( cwid , stdout , 5 , "\n" , 0) ,mclvaDump ( dwid , stdout , 5 , "\n" , 0) #endif ; sum_meet_c = 0.01 + mclvSum(meet_c) ; sum_meet_d = 0.01 + mclvSum(meet_d) ; mclvBinary(meet_c, weightc, meet_c, fltMultiply) ; mclvBinary(meet_d, weightd, meet_d, fltMultiply) ; reduction_c = mclvSum(meet_c) / sum_meet_c ; reduction_d = mclvSum(meet_d) / sum_meet_d ; ip = mclvIn(meet_c, meet_d) ; cd = sqrt(mclvPowSum(meet_c, 2.0) * mclvPowSum(meet_d, 2.0)) ; csn = cd ? ip / cd : 0.0 ; meanc = meet_c->n_ivps ? mclvSum(meet_c) / meet_c->n_ivps : 0.0 ; meand = meet_d->n_ivps ? mclvSum(meet_d) / meet_d->n_ivps : 0.0 ; mean = MCX_MIN(meanc, meand) ; euclid = 0 ? 1.0 : ( mean ? sqrt(mclvPowSum(meet_c, 2.0) / mclvPowSum(vecc, 2.0)) : 0.0 ) ; meet_fraction = pow((meet_c->n_ivps * 1.0 / vecc->n_ivps), 1.0) ; score = mean * csn * euclid * meet_fraction * 1.0 ; mclvFree(&meet_c) ; mclvFree(&meet_d) ; fprintf ( stdout , "%10d%10d%10d%10d%10d%10g%10g%10g%10g%10g%10g%10g\n" , (int) c->vid , (int) d->vid , (int) nldif , (int) nrdif , (int) nmeet , score , mean , csn , euclid , meet_fraction , reduction_c , reduction_d ) ; return score ; }
static dim clm_clm_prune ( mclx* mx , mclx* cl , dim prune_sz , mclx** cl_adjustedpp , dim* n_sink , dim* n_source ) { dim d, n_adjusted = 0 ; mclx* cl_adj = mclxCopy(cl) ; mclv* cid_affected = mclvClone(cl->dom_cols) ; const char* me = "clmAssimilate" ; double bar_affected = 1.5 ; mclx *el_to_cl = NULL ; mclx *el_on_cl = NULL ; mclx *cl_on_cl = NULL ; mclx *cl_on_el = NULL ; *n_sink = 0 ; *n_source = 0 ; mclvMakeConstant(cid_affected, 1.0) ; mclxColumnsRealign(cl_adj, mclvSizeCmp) ; *cl_adjustedpp = NULL ; clmCastActors (&mx, &cl_adj, &el_to_cl, &el_on_cl, &cl_on_cl, &cl_on_el, 0.95) ; mclxFree(&cl_on_el) ; for (d=0;d<N_COLS(cl_on_cl);d++) { mclv* clthis = cl_adj->cols+d ; mclv* cllist = cl_on_cl->cols+d ; mclp* pself = mclvGetIvp(cllist, clthis->vid, NULL) ; double self_val = -1.0 ; if (pself) self_val = pself->val , pself->val *= 1.001 /* to push it up in case of equal weights */ ;if(0)fprintf(stderr, "test size %d\n", (int) clthis->n_ivps) ; if (prune_sz && clthis->n_ivps > prune_sz) continue ; while (1) { mclv* clthat ; dim e ; if (cllist->n_ivps < 2) break ; mclvSort(cllist, mclpValRevCmp) /* now get biggest mass provided that cluster * ranks higher (has at least as many entries) * * fixme/todo: we probably have a slight order * dependency for some fringe cases. If provable * then either solve or document it. */ ; for (e=0;e<cllist->n_ivps;e++) if (cllist->ivps[e].idx >= clthis->vid) break /* found none or itself */ ; if (e == cllist->n_ivps || cllist->ivps[e].idx == clthis->vid) break ; if /* Should Not Happen */ (!(clthat = mclxGetVector(cl_adj, cllist->ivps[e].idx, RETURN_ON_FAIL, NULL) ) ) break /* works for special case prune_sz == 0 */ /* if (clthat->n_ivps + clthis->n_ivps > prune_sz) */ /* ^iced. inconsistent behaviour as k grows. */ ; { mcxLog ( MCX_LOG_LIST , me , "source %ld|%lu|%.3f absorbed by %ld|%lu|%.3f" , clthis->vid, (ulong) clthis->n_ivps, self_val , clthat->vid, (ulong) clthat->n_ivps, cllist->ivps[0].val ) ; n_adjusted += clthis->n_ivps ; (*n_sink)++ /* note: we could from our precomputed cl_on_cl * obtain that A is absorbed in B, B is absorbed in C. * below we see that A will be merged with B, * and the result will then be merged with C. * This depends on the fact that cl_adj is ordered * on increasing cluster size. */ ; mcldMerge(cl_adj->cols+d, clthat, clthat) ; mclvResize(cl_adj->cols+d, 0) ; mclvInsertIdx(cid_affected, clthat->vid, 2.0) ; } break ; } mclvSort(cllist, mclpIdxCmp) ; } mclxFree(&cl_on_cl) ; mclxFree(&el_on_cl) ; mclxFree(&el_to_cl) ; mclxMakeCharacteristic(cl) ; mclvUnary(cid_affected, fltxGT, &bar_affected) ; *n_source = cid_affected->n_ivps ; mclvFree(&cid_affected) ; mclxColumnsRealign(cl_adj, mclvSizeRevCmp) ; if (!n_adjusted) { mclxFree(&cl_adj) ; return 0 ; } mclxUnary(cl_adj, fltxCopy, NULL) ; mclxMakeCharacteristic(cl_adj) ; *cl_adjustedpp = cl_adj ; return n_adjusted ; }
static dim clm_clm_adjust ( mclx* mx , mclx* cl , dim cls_size_max , mclx** cl_adjustedpp , mclv** cid_affectedpp , mclv** nid_affectedpp ) { dim i, j, n_adjusted = 0 ; mclx* cl_adj = mclxCopy(cl) ; mclv* cid_affected = mclvClone(cl->dom_cols) ; mclv* nid_affected = mclvClone(mx->dom_cols) ; double bar_affected = 1.5 ; const char* e1 = getenv("MCL_ADJ_FMAX") ; const char* e2 = getenv("MCL_ADJ_EMASS") ; double f1 = e1 ? atof(e1) : 2 ; double f2 = e2 ? atof(e2) : 3 ; mcxbool loggit = mcxLogGet( MCX_LOG_CELL | MCX_LOG_INFO ) ; clmVScore sc ; mclx *el_to_cl = NULL ; mclx *el_on_cl = NULL ; mclx *cl_on_cl = NULL ; mclx *cl_on_el = NULL ; *cl_adjustedpp = NULL ; *cid_affectedpp = NULL ; *nid_affectedpp = NULL ; clmCastActors (&mx, &cl, &el_to_cl, &el_on_cl, &cl_on_cl, &cl_on_el, 0.95) ; mclxFree(&cl_on_cl) ; mclxFree(&cl_on_el) ; mclvMakeConstant(cid_affected, 1.0) ; mclvMakeConstant(nid_affected, 1.0) ; for (i=0;i<N_COLS(cl_adj);i++) cl_adj->cols[i].val = 0.5 /* Proceed with smallest clusters first. * Caller has to take care of mclxColumnsRealign */ ; for (i=0;i<N_COLS(cl);i++) { mclv* clself = cl->cols+i /* Only consider nodes in clusters of * size <= cls_size_max */ ; if (cls_size_max && clself->n_ivps > cls_size_max) break /* Clusters that have been marked for inclusion * cannot play. */ ; if (cl_adj->cols[i].val > 1) continue ; for (j=0;j<clself->n_ivps;j++) { long nid = clself->ivps[j].idx ; long nos = mclvGetIvpOffset(mx->dom_cols, nid, -1) ; mclv* clidvec = mclxGetVector(el_on_cl, nid, RETURN_ON_FAIL, NULL) ; double eff_alien_bsf = 0.0, eff_alien_max_bsf = 0.0 /* best so far*/ ; double eff_self = 0.0, eff_self_max = 0.0 ; long cid_alien = -1, cid_self = -1 ; clmVScore sc_self = { 0 }, sc_alien = { 0 } ; dim f ; if (nos < 0 || !clidvec) { mcxErr ("clmDumpNodeScores panic", "node <%ld> does not belong", nid) ; continue ; } clmVScanDomain(mx->cols+nos, clself, &sc) ; clmVScoreCoverage(&sc, &eff_self, &eff_self_max) ; cid_self = clself->vid ; sc_self = sc ; if (loggit) mcxLog2 ( us , "node %ld in cluster %ld eff %.3f,%.3f sum %.3f" , nid , cid_self , eff_self , eff_self_max , sc.sum_i ) ; for (f=0;f<clidvec->n_ivps;f++) { long cid = clidvec->ivps[f].idx ; mclv* clvec = mclxGetVector(cl, cid, RETURN_ON_FAIL, NULL) /* ^ overdoing: cid == clvec->vid */ ; double eff, eff_max ; if (!clvec) { mcxErr ( "clmAdjust panic" , "cluster <%ld> node <%ld> mishap" , cid , nid ) ; continue ; } /* fixme: document or remove first condition * */ if ((0 && clvec->n_ivps <= clself->n_ivps) || clvec->vid == cid_self) continue ; clmVScanDomain(mx->cols+nos, clvec, &sc) ; clmVScoreCoverage(&sc, &eff, &eff_max) #if 0 # define PIVOT eff > eff_alien_bsf #else # define PIVOT eff_max > eff_alien_max_bsf #endif ; if ( PIVOT || sc.sum_i >= 0.5 ) eff_alien_bsf = eff , eff_alien_max_bsf = eff_max , cid_alien = clvec->vid , sc_alien = sc ; if (sc.sum_i >= 0.5) break ; } if (loggit) mcxLog2 ( us , " -> best alien %ld eff %.3f,%.3f sum %.3f" , cid_alien , eff_alien_bsf , eff_alien_max_bsf , sc_alien.sum_i ) /* below: use sum_i as mass fraction * (clmAdjust framework uses stochastic * matrix) */ ; if ( cid_alien >= 0 && cid_self >= 0 && f1 * sc_alien.max_i >= sc_self.max_i && ( ( eff_alien_bsf > eff_self && sc_alien.sum_i > sc_self.sum_i ) || ( pow(sc_alien.sum_i, f2) >= sc_self.sum_i && pow(eff_self, f2) <= eff_alien_bsf ) ) /* So, if max is reasonable * and efficiency is better and mass is better * or if mass is ridiculously better -> move * Somewhat intricate and contrived, yes. */ ) { mclv* acceptor = mclxGetVector(cl_adj, cid_alien, RETURN_ON_FAIL, NULL) ; mclv* donor = mclxGetVector(cl_adj, cid_self, RETURN_ON_FAIL, NULL) ; if (!donor || !acceptor || acceptor == donor) continue ; mclvInsertIdx(donor, nid, 0.0) ; mclvInsertIdx(acceptor, nid, 1.0) ; acceptor->val = 1.5 ; if (mcxLogGet(MCX_LOG_LIST)) { mclv* nb = mx->cols+nos ; double mxv = mclvMaxValue(nb) ; double avg = nb->n_ivps ? mclvSum(nb) / nb->n_ivps : -1.0 ; mcxLog ( MCX_LOG_LIST , us , "mov %ld (%ld %.2f %.2f)" " %ld (cv=%.2f cm=%.2f s=%.2f m=%.2f #=%lu)" " to %ld (cv=%.2f cm=%.2f s=%.2f m=%.2f #=%lu)" , nid , (long) nb->n_ivps, mxv, avg , cid_self , eff_self, eff_self_max, sc_self.sum_i, sc_self.max_i , (ulong) (sc_self.n_meet + sc_self.n_ddif) , cid_alien , eff_alien_bsf, eff_alien_max_bsf, sc_alien.sum_i, sc_alien.max_i , (ulong) (sc_alien.n_meet + sc_alien.n_ddif) ) ; } n_adjusted++ ; mclvInsertIdx(cid_affected, cid_alien, 2.0) ; mclvInsertIdx(cid_affected, cid_self, 2.0) ; mclvInsertIdx(nid_affected, nid, 2.0) ; } } } mclxFree(&el_on_cl) ; mclxFree(&el_to_cl) ; for (i=0;i<N_COLS(cl_adj);i++) cl_adj->cols[i].val = 0.0 ; mclxMakeCharacteristic(cl) ; if (!n_adjusted) { mclxFree(&cl_adj) ; mclvFree(&cid_affected) ; mclvFree(&nid_affected) ; return 0 ; } mclxUnary(cl_adj, fltxCopy, NULL) ; mclxMakeCharacteristic(cl_adj) /* FIRST REMOVE ENTRIES set to zero (sssst now .. */ /* ...) and THEN make it characteristic again */ ; mclvUnary(cid_affected, fltxGT, &bar_affected) ; mclvUnary(nid_affected, fltxGT, &bar_affected) ; *cl_adjustedpp = cl_adj ; *cid_affectedpp = cid_affected ; *nid_affectedpp = nid_affected ; return n_adjusted ; }