static void start_instruction(void *h) { unsigned long *w0 = h; wmb(); /* setting CMD bit must be last */ *w0 = *w0 | 1; gru_flush_cache(h); }
static void start_instruction(void *h) { unsigned long *w0 = h; wmb(); /* setting CMD/STATUS bits must be last */ *w0 = *w0 | 0x20001; gru_flush_cache(h); }
static void start_instruction(void *h) { unsigned long *w0 = h; wmb(); /* */ *w0 = *w0 | 0x20001; gru_flush_cache(h); }
/*----------------------------------------------------------------------*/ int gru_get_cb_exception_detail(void *cb, struct control_block_extended_exc_detail *excdet) { struct gru_control_block_extended *cbe; struct gru_blade_state *bs; int cbrnum; bs = KCB_TO_BS(cb); cbrnum = thread_cbr_number(bs->bs_kgts, get_cb_number(cb)); cbe = get_cbe(GRUBASE(cb), cbrnum); gru_flush_cache(cbe); /* CBE not coherent */ excdet->opc = cbe->opccpy; excdet->exopc = cbe->exopccpy; excdet->ecause = cbe->ecause; excdet->exceptdet0 = cbe->idef1upd; excdet->exceptdet1 = cbe->idef3upd; gru_flush_cache(cbe); return 0; }
/*----------------------------------------------------------------------*/ int gru_get_cb_exception_detail(void *cb, struct control_block_extended_exc_detail *excdet) { struct gru_control_block_extended *cbe; struct gru_thread_state *kgts = NULL; unsigned long off; int cbrnum, bid; /* * Locate kgts for cb. This algorithm is SLOW but * this function is rarely called (ie., almost never). * Performance does not matter. */ for_each_possible_blade(bid) { if (!gru_base[bid]) break; kgts = gru_base[bid]->bs_kgts; if (!kgts || !kgts->ts_gru) continue; off = cb - kgts->ts_gru->gs_gru_base_vaddr; if (off < GRU_SIZE) break; kgts = NULL; } BUG_ON(!kgts); cbrnum = thread_cbr_number(kgts, get_cb_number(cb)); cbe = get_cbe(GRUBASE(cb), cbrnum); gru_flush_cache(cbe); /* CBE not coherent */ sync_core(); excdet->opc = cbe->opccpy; excdet->exopc = cbe->exopccpy; excdet->ecause = cbe->ecause; excdet->exceptdet0 = cbe->idef1upd; excdet->exceptdet1 = cbe->idef3upd; gru_flush_cache(cbe); return 0; }
int gru_get_cb_exception_detail(void *cb, struct control_block_extended_exc_detail *excdet) { struct gru_control_block_extended *cbe; struct gru_thread_state *kgts = NULL; unsigned long off; int cbrnum, bid; /* */ for_each_possible_blade(bid) { if (!gru_base[bid]) break; kgts = gru_base[bid]->bs_kgts; if (!kgts || !kgts->ts_gru) continue; off = cb - kgts->ts_gru->gs_gru_base_vaddr; if (off < GRU_SIZE) break; kgts = NULL; } BUG_ON(!kgts); cbrnum = thread_cbr_number(kgts, get_cb_number(cb)); cbe = get_cbe(GRUBASE(cb), cbrnum); gru_flush_cache(cbe); /* */ sync_core(); excdet->opc = cbe->opccpy; excdet->exopc = cbe->exopccpy; excdet->ecause = cbe->ecause; excdet->exceptdet0 = cbe->idef1upd; excdet->exceptdet1 = cbe->idef3upd; gru_flush_cache(cbe); return 0; }
static int gru_retry_exception(void *cb) { struct gru_control_block_status *gen = (void *)cb; struct control_block_extended_exc_detail excdet; int retry = EXCEPTION_RETRY_LIMIT; while (1) { if (gru_wait_idle_or_exception(gen) == CBS_IDLE) return CBS_IDLE; if (gru_get_cb_message_queue_substatus(cb)) return CBS_EXCEPTION; gru_get_cb_exception_detail(cb, &excdet); if ((excdet.ecause & ~EXCEPTION_RETRY_BITS) || (excdet.cbrexecstatus & CBR_EXS_ABORT_OCC)) break; if (retry-- == 0) break; gen->icmd = 1; gru_flush_cache(gen); } return CBS_EXCEPTION; }