void calc_domainconn( vtx_t const nvtxs, adj_t const * const xadj, vtx_t const * const adjncy, vlbl_t const nparts, vlbl_t const * const where, int * const dc) { vtx_t i, k; adj_t j; vlbl_t me; int * dd; dd = int_init_alloc(0,nparts*nparts); for (i=0;i<nvtxs;++i) { me = where[i]; for (j=xadj[i];j<xadj[i+1];++j) { k = adjncy[j]; if (me != where[k]) { dd[(me*nparts)+where[k]] = 1; } } } int_set(dc,0,nparts); for (me=0;me<nparts;++me) { dc[me] = int_sum(dd+(nparts*me),nparts); } dl_free(dd); }
int main(void) { int i; int ary[] = {1, 2, 3, 4, 5}; int_set(ary, 5, 0); for (i = 0; i < 5; i++) { printf("ary[%d] = %d\n", i, ary[i]); } return(0); }
// ----------------------------------------------------------------------- void * timer_thread(void *ptr) { struct timespec ts; unsigned clock_tick_nsec = timer_step * 1000000; unsigned new_nsec; clock_gettime(CLOCK_REALTIME , &ts); while (1) { new_nsec = ts.tv_nsec + clock_tick_nsec; ts.tv_sec += new_nsec / 1000000000L; ts.tv_nsec = new_nsec % 1000000000L; if (!sem_timedwait(&timer_quit, &ts)) { break; } if (atom_load_acquire(&timer_enabled)) { int_set(atom_load_acquire(&timer_int)); } } pthread_exit(NULL); }
// ----------------------------------------------------------------------- void cmem_int_report(struct cmem_chan_t *chan) { // if interrupt is reported and not yet served, there's nothing to do, for now pthread_mutex_lock(&CHAN->int_mutex); if (CHAN->int_reported) { pthread_mutex_unlock(&CHAN->int_mutex); return; } pthread_mutex_unlock(&CHAN->int_mutex); // check if any unit reported interrupt for (int unit_n=0 ; unit_n<CMEM_MAX_DEVICES ; unit_n++) { pthread_mutex_lock(&CHAN->int_mutex); if ((CHAN->int_unit[unit_n] != CMEM_INT_NONE) && !CHAN->int_mask) { chan->int_reported = unit_n; pthread_mutex_unlock(&CHAN->int_mutex); LOG(L_CMEM, 20, "CMEM (ch:%i) reporting interrupt %i", chan->proto.num, chan->proto.num + 12); int_set(chan->proto.num + 12); break; } else { pthread_mutex_unlock(&CHAN->int_mutex); } } }
static void serial_int_check() { if (emulate_casplus) casplus_int_set(15, serial.interrupts & serial.IER); else int_set(INT_SERIAL, serial.interrupts & serial.IER); }
static inline void serial_cx_int_check() { int_set(INT_SERIAL, serial_cx.int_status & serial_cx.int_mask); }
void atomic_cycle_distribution( vtx_t const nvtxs, adj_t const * const xadj, vtx_t const * const adjncy, adj_t * radj, size_t ** const r_cycles, vtx_t * const r_maxcycle) { int free_radj; vtx_t i, k, v, m, sq, nq, si, ni, curlen, maxlen; adj_t j, l; vtx_t * q, * qi, * len; int * vvisited, * evisited, * ivisited; size_t * cycles; adj_t const nedges = xadj[nvtxs]; q = vtx_alloc(nvtxs); qi = vtx_alloc(nvtxs); vvisited = int_calloc(nvtxs); evisited = int_calloc(nedges); ivisited = int_calloc(nvtxs); cycles = size_alloc(dl_min(nvtxs,nedges)); len = vtx_alloc(nvtxs); if (radj) { free_radj = 0; } else { radj = adj_alloc(nedges); build_adjncy_index(nvtxs,xadj,adjncy,radj); free_radj = 1; } maxlen = 0; cycles[0] = 0; /* seed the queue */ sq = nq = 0; v = vtx_rand(0,nvtxs); q[nq++] = v; vvisited[v] = 1; /* algorithm from Gashler and Martinez 2012 */ while (sq < nq) { v = q[sq++]; for (j=xadj[v];j<xadj[v+1];++j) { k = adjncy[j]; if (vvisited[k]) { len[k] = 1; si = ni = 0; qi[ni++] = k; ivisited[k] = 1; while (si < ni) { i = qi[si++]; for (l=xadj[i];l<xadj[i+1];++l) { m = adjncy[l]; if (!ivisited[m] && evisited[l]) { len[m] = len[i]+1; qi[ni++] = m; ivisited[m] = 1; if (m == v) { curlen = len[m]; /* zero out new cycle lengths */ while (curlen > maxlen) { cycles[++maxlen] = 0; } ++cycles[curlen]; /* I might need to break here */ si = ni; break; } } } } /* clear ivisited */ if (ni < nvtxs/64) { for (i=0;i<ni;++i) { ivisited[qi[i]] = 0; } } else { int_set(ivisited,0,nvtxs); } } else { q[nq++] = k; vvisited[k] = 1; } evisited[j] = 1; evisited[radj[j]] = 1; } } /* hack to ignore length 2 cycles */ cycles[2] = 0; if (r_maxcycle) { *r_maxcycle = maxlen; } if (r_cycles) { *r_cycles = cycles; } if (free_radj) { dl_free(radj); } }