void mvc_logmanager(void) { Thread thr = THRnew("logmanager"); store_manager(); THRdel(thr); }
void mvc_minmaxmanager(void) { Thread thr = THRnew("minmaxmanager"); minmax_manager(); THRdel(thr); }
/* * When a client needs to be terminated then the file descriptors for * its input/output are simply closed. This leads to a graceful * degradation, but may take some time when the client is busy. A more * forcefull method is to kill the client thread, but this may leave * locks and semaphores in an undesirable state. * * The routine freeClient ends a single client session, but through side * effects of sharing IO descriptors, also its children. Conversely, a * child can not close a parent. */ void freeClient(Client c) { Thread t = c->mythread; c->mode = FINISHCLIENT; #ifdef MAL_CLIENT_DEBUG printf("# Free client %d\n", c->idx); #endif MCexitClient(c); /* scope list and curprg can not be removed, because the client may * reside in a quit() command. Therefore the scopelist is re-used. */ c->scenario = NULL; if (c->prompt) GDKfree(c->prompt); c->prompt = NULL; c->promptlength = -1; if (c->errbuf) { GDKsetbuf(0); if (c->father == NULL) GDKfree(c->errbuf); c->errbuf = 0; } c->father = 0; c->login = c->lastcmd = 0; //c->active = 0; c->qtimeout = 0; c->stimeout = 0; c->user = oid_nil; if( c->username){ GDKfree(c->username); c->username = 0; } c->mythread = 0; GDKfree(c->glb); c->glb = NULL; if( c->error_row){ BBPdecref(c->error_row->batCacheid,TRUE); BBPdecref(c->error_fld->batCacheid,TRUE); BBPdecref(c->error_msg->batCacheid,TRUE); BBPdecref(c->error_input->batCacheid,TRUE); c->error_row = c->error_fld = c->error_msg = c->error_input = NULL; } if (t) THRdel(t); /* you may perform suicide */ MT_sema_destroy(&c->s); c->mode = MCshutdowninprogress()? BLOCKCLIENT: FREECLIENT; }
/* * When a client needs to be terminated then the file descriptors for * its input/output are simply closed. This leads to a graceful * degradation, but may take some time when the client is busy. A more * forcefull method is to kill the client thread, but this may leave * locks and semaphores in an undesirable state. * * The routine freeClient ends a single client session, but through side * effects of sharing IO descriptors, also its children. Conversely, a * child can not close a parent. */ void freeClient(Client c) { Thread t = c->mythread; c->mode = FINISHING; #ifdef MAL_CLIENT_DEBUG printf("# Free client %d\n", c->idx); #endif MCexitClient(c); /* scope list and curprg can not be removed, because the client may * reside in a quit() command. Therefore the scopelist is re-used. */ c->scenario = NULL; if (c->prompt) GDKfree(c->prompt); c->prompt = NULL; c->promptlength = -1; if (c->errbuf) { GDKsetbuf(0); if (c->father == NULL) GDKfree(c->errbuf); c->errbuf = 0; } c->father = 0; c->login = c->lastcmd = 0; c->qtimeout = 0; c->stimeout = 0; if (c->rcc) { GDKfree(c->rcc); c->rcc = NULL; } c->user = oid_nil; c->mythread = 0; c->mode = FREECLIENT; GDKfree(c->glb); c->glb = NULL; if (t) THRdel(t); /* you may perform suicide */ }
static void DFLOWworker(void *T) { struct worker *t = (struct worker *) T; DataFlow flow; FlowEvent fe = 0, fnxt = 0; int id = (int) (t - workers); Thread thr; str error = 0; int i,last; Client cntxt; InstrPtr p; thr = THRnew("DFLOWworker"); GDKsetbuf(GDKmalloc(GDKMAXERRLEN)); /* where to leave errors */ GDKerrbuf[0] = 0; MT_lock_set(&dataflowLock, "DFLOWworker"); cntxt = t->cntxt; MT_lock_unset(&dataflowLock, "DFLOWworker"); if (cntxt) { /* wait until we are allowed to start working */ MT_sema_down(&t->s, "DFLOWworker"); } while (1) { if (fnxt == 0) { MT_lock_set(&dataflowLock, "DFLOWworker"); cntxt = t->cntxt; MT_lock_unset(&dataflowLock, "DFLOWworker"); fe = q_dequeue(todo, cntxt); if (fe == NULL) { if (cntxt) { /* we're not done yet with work for the current * client (as far as we know), so give up the CPU * and let the scheduler enter some more work, but * first compensate for the down we did in * dequeue */ MT_sema_up(&todo->s, "DFLOWworker"); MT_sleep_ms(1); continue; } /* no more work to be done: exit */ break; } } else fe = fnxt; if (ATOMIC_GET(exiting, exitingLock, "DFLOWworker")) { break; } fnxt = 0; assert(fe); flow = fe->flow; assert(flow); /* whenever we have a (concurrent) error, skip it */ if (flow->error) { q_enqueue(flow->done, fe); continue; } /* skip all instructions when we have encontered an error */ if (flow->error == 0) { #ifdef USE_MAL_ADMISSION if (MALadmission(fe->argclaim, fe->hotclaim)) { fe->hotclaim = 0; /* don't assume priority anymore */ if (todo->last == 0) MT_sleep_ms(DELAYUNIT); q_requeue(todo, fe); continue; } #endif error = runMALsequence(flow->cntxt, flow->mb, fe->pc, fe->pc + 1, flow->stk, 0, 0); PARDEBUG fprintf(stderr, "#executed pc= %d wrk= %d claim= " LLFMT "," LLFMT " %s\n", fe->pc, id, fe->argclaim, fe->hotclaim, error ? error : ""); #ifdef USE_MAL_ADMISSION /* release the memory claim */ MALadmission(-fe->argclaim, -fe->hotclaim); #endif /* update the numa information. keep the thread-id producing the value */ p= getInstrPtr(flow->mb,fe->pc); for( i = 0; i < p->argc; i++) flow->mb->var[getArg(p,i)]->worker = thr->tid; MT_lock_set(&flow->flowlock, "DFLOWworker"); fe->state = DFLOWwrapup; MT_lock_unset(&flow->flowlock, "DFLOWworker"); if (error) { MT_lock_set(&flow->flowlock, "DFLOWworker"); /* only collect one error (from one thread, needed for stable testing) */ if (!flow->error) flow->error = error; MT_lock_unset(&flow->flowlock, "DFLOWworker"); /* after an error we skip the rest of the block */ q_enqueue(flow->done, fe); continue; } } /* see if you can find an eligible instruction that uses the * result just produced. Then we can continue with it right away. * We are just looking forward for the last block, which means we * are safe from concurrent actions. No other thread can steal it, * because we hold the logical lock. * All eligible instructions are queued */ #ifdef USE_MAL_ADMISSION { InstrPtr p = getInstrPtr(flow->mb, fe->pc); assert(p); fe->hotclaim = 0; for (i = 0; i < p->retc; i++) fe->hotclaim += getMemoryClaim(flow->mb, flow->stk, p, i, FALSE); } #endif MT_lock_set(&flow->flowlock, "DFLOWworker"); for (last = fe->pc - flow->start; last >= 0 && (i = flow->nodes[last]) > 0; last = flow->edges[last]) if (flow->status[i].state == DFLOWpending && flow->status[i].blocks == 1) { flow->status[i].state = DFLOWrunning; flow->status[i].blocks = 0; flow->status[i].hotclaim = fe->hotclaim; flow->status[i].argclaim += fe->hotclaim; fnxt = flow->status + i; break; } MT_lock_unset(&flow->flowlock, "DFLOWworker"); q_enqueue(flow->done, fe); if ( fnxt == 0) { int last; MT_lock_set(&todo->l, "DFLOWworker"); last = todo->last; MT_lock_unset(&todo->l, "DFLOWworker"); if (last == 0) profilerHeartbeatEvent("wait", 0); } } GDKfree(GDKerrbuf); GDKsetbuf(0); THRdel(thr); MT_lock_set(&dataflowLock, "DFLOWworker"); t->flag = EXITED; MT_lock_unset(&dataflowLock, "DFLOWworker"); }