void *WorkerThread(void *arg) { workerctx_t *wc = (workerctx_t *)arg; #ifdef HAVE___THREAD workerctx_cur = wc; #else /* HAVE___THREAD */ /* set pointer to worker context as TSD */ pthread_setspecific(workerctx_key, wc); #endif /* HAVE___THREAD */ //FIXME #ifdef USE_MCTX_PCL assert(0 == co_thread_init()); wc->mctx = co_current(); #endif wc->terminate = 0; wc->current_task = NULL; LpelThreadAssign(wc->wid + 1); // 0 is for the master WorkerLoop(wc); #ifdef USE_LOGGING /* cleanup monitoring */ if (wc->mon && MON_CB(worker_destroy)) { MON_CB(worker_destroy)(wc->mon); } #endif #ifdef USE_MCTX_PCL co_thread_cleanup(); #endif return NULL; }
/** * Replace a stream opened for reading by another stream * Destroys old stream. * * @param sd stream descriptor for which the stream must be replaced * @param snew the new stream * @pre snew must not be opened by same or other task */ void LpelStreamReplace( lpel_stream_desc_t *sd, lpel_stream_t *snew) { assert( sd->mode == 'r'); STREAM_DBG("task %d replace stream %d by stream %d, mode %c\n", sd->task->uid, sd->stream->uid, snew->uid, sd->mode); workerctx_t *wc = sd->task->worker_context; lpel_stream_t *s = sd->stream; snew->type = s->type; /* free the old stream */ s->prod_sd->stream = NULL; s->prod_sd = NULL; s->cons_sd = NULL; assert(LpelBufferIsEmpty(&s->buffer)); LpelWorkerPutStream(wc, s); /* assign new stream */ lpel_stream_desc_t *old_cons = snew->cons_sd; old_cons->stream = NULL; // unset the stream pointer of the old consumer snew->cons_sd = sd; sd->stream = snew; /* MONITORING CALLBACK */ #ifdef USE_TASK_EVENT_LOGGING if (sd->mon && MON_CB(stream_replace)) { MON_CB(stream_replace)(sd->mon, snew->uid); } #endif }
/** * Open a stream for reading/writing * * @param s pointer to stream * @param mode either 'r' for reading or 'w' for writing * @return a stream descriptor * @pre only one task may open it for reading resp. writing * at any given point in time */ lpel_stream_desc_t *LpelStreamOpen( lpel_stream_t *s, char mode) { lpel_stream_desc_t *sd; lpel_task_t *ct = LpelTaskSelf(); assert( mode == 'r' || mode == 'w' ); sd = (lpel_stream_desc_t *) malloc( sizeof( lpel_stream_desc_t)); sd->task = ct; sd->stream = s; sd->mode = mode; sd->next = NULL; #ifdef USE_TASK_EVENT_LOGGING /* create monitoring object, or NULL if stream * is not going to be monitored (depends on ct->mon) */ if (ct->mon && MON_CB(stream_open)) { sd->mon = MON_CB(stream_open)( ct->mon, s->uid, mode); } else { sd->mon = NULL; } #else sd->mon = NULL; #endif switch(mode) { case 'r': s->cons_sd = sd; break; case 'w': s->prod_sd = sd; break; } return sd; }
/** * Close a stream previously opened for reading/writing * * @param sd stream descriptor * @param destroy_s if != 0, destroy the stream as well */ void LpelStreamClose( lpel_stream_desc_t *sd, int destroy_s) { /* MONITORING CALLBACK */ #ifdef USE_TASK_EVENT_LOGGING if (sd->mon && MON_CB(stream_close)) { MON_CB(stream_close)(sd->mon); } #endif STREAM_DBG("task %d close one stream, mode %c\n", sd->task->uid, sd->mode); workerctx_t *wc = sd->task->worker_context; if (destroy_s) { STREAM_DBG("task %d destroy stream %d, mode %c\n", sd->task->uid, sd->stream->uid, sd->mode); assert(sd->mode == 'r'); lpel_stream_t *s = sd->stream; assert(LpelBufferIsEmpty(&s->buffer)); /* free the stream structure */ s->prod_sd->stream = NULL; // unset the stream pointer of producer s->prod_sd = NULL; // unset producer s->cons_sd = NULL; // unset consumer LpelWorkerPutStream(wc, s); // put back to worker's free list sd->stream = NULL; } LpelTaskRemoveStream(sd->task, sd, sd->mode); sd->task = NULL; // unset only the pointer to task LpelWorkerPutSd(wc, sd); // put back to worker's free list }
/** * Initialise worker globally * * * @param size size of the worker set, i.e., the total number of workers */ void LpelWorkersInit(int size) { int i, res; assert(0 <= size); num_workers = size; #ifndef HAVE___THREAD /* init key for thread specific data */ pthread_key_create(&workerctx_key, NULL); #endif /* HAVE___THREAD */ /* initialize spmdext module */ res = LpelSpmdInit(num_workers); /* allocate worker context table */ workers = (workerctx_t **) malloc( num_workers * sizeof(workerctx_t*) ); /* allocate worker contexts */ for (i=0; i<num_workers; i++) { workers[i] = (workerctx_t *) malloc( sizeof(workerctx_t) ); } /* prepare data structures */ for( i=0; i<num_workers; i++) { workerctx_t *wc = WORKER_PTR(i); wc->wid = i; wc->num_tasks = 0; wc->terminate = 0; wc->sched = LpelSchedCreate( i); wc->wraptask = NULL; #ifdef USE_LOGGING if (MON_CB(worker_create)) { wc->mon = MON_CB(worker_create)(wc->wid); } else { wc->mon = NULL; } #else wc->mon = NULL; #endif /* mailbox */ wc->mailbox = LpelMailboxCreate(0); /* taskqueue of free tasks */ //LpelTaskqueueInit( &wc->free_tasks); } assert(res==0); }
static void requestTask(workerctx_t *wc) { WORKER_DBG("worker %d: request task\n", wc->wid); workermsg_t msg; msg.type = WORKER_MSG_REQUEST; msg.body.from_worker = wc->wid; LpelMailboxSend(mastermb, &msg); #ifdef USE_LOGGING if (wc->mon && MON_CB(worker_waitstart)) { MON_CB(worker_waitstart)(wc->mon); } #endif }
/** * Blocking, consuming read from a stream * * If the stream is empty, the task is suspended until * a producer writes an item to the stream. * * @param sd stream descriptor * @return the next item of the stream * @pre current task is single reader */ void *LpelStreamRead( lpel_stream_desc_t *sd) { void *item; lpel_task_t *self = sd->task; assert( sd->mode == 'r'); /* MONITORING CALLBACK */ #ifdef USE_TASK_EVENT_LOGGING if (sd->mon && MON_CB(stream_readprepare)) { MON_CB(stream_readprepare)(sd->mon); } #endif /* quasi P(n_sem) */ if ( atomic_fetch_sub( &sd->stream->n_sem, 1) == 0) { #ifdef USE_TASK_EVENT_LOGGING /* MONITORING CALLBACK */ if (sd->mon && MON_CB(stream_blockon)) { MON_CB(stream_blockon)(sd->mon); } #endif /* wait on stream: */ LpelTaskBlockStream( self); } /* read the top element */ item = LpelBufferTop( &sd->stream->buffer); assert( item != NULL); /* pop off the top element */ LpelBufferPop( &sd->stream->buffer); /* quasi V(e_sem) */ if ( atomic_fetch_add( &sd->stream->e_sem, 1) < 0) { /* e_sem was -1 */ lpel_task_t *prod = sd->stream->prod_sd->task; /* wakeup producer: make ready */ LpelTaskUnblock( self, prod); /* MONITORING CALLBACK */ #ifdef USE_TASK_EVENT_LOGGING if (sd->mon && MON_CB(stream_wakeup)) { MON_CB(stream_wakeup)(sd->mon); } #endif } /* MONITORING CALLBACK */ #ifdef USE_TASK_EVENT_LOGGING if (sd->mon && MON_CB(stream_readfinish)) { MON_CB(stream_readfinish)(sd->mon, item); } #endif return item; }
/** * Close a stream previously opened for reading/writing * * @param sd stream descriptor * @param destroy_s if != 0, destroy the stream as well */ void LpelStreamClose( lpel_stream_desc_t *sd, int destroy_s) { /* MONITORING CALLBACK */ #ifdef USE_TASK_EVENT_LOGGING if (sd->mon && MON_CB(stream_close)) { MON_CB(stream_close)(sd->mon); } #endif if (destroy_s) { LpelStreamDestroy( sd->stream); } free(sd); }
static void WorkerLoop(workerctx_t *wc) { WORKER_DBG("start worker %d\n", wc->wid); lpel_task_t *t = NULL; requestTask(wc); // ask for the first time workermsg_t msg; do { LpelMailboxRecv(wc->mailbox, &msg); switch(msg.type) { case WORKER_MSG_ASSIGN: t = msg.body.task; WORKER_DBG("worker %d: get task %d\n", wc->wid, t->uid); assert(t->state == TASK_READY); t->worker_context = wc; wc->current_task = t; #ifdef USE_LOGGING if (wc->mon && MON_CB(worker_waitstop)) { MON_CB(worker_waitstop)(wc->mon); } if (t->mon && MON_CB(task_assign)) { MON_CB(task_assign)(t->mon, wc->mon); } #endif mctx_switch(&wc->mctx, &t->mctx); //task return here assert(t->state != TASK_RUNNING); // if (t->state != TASK_ZOMBIE) { wc->current_task = NULL; t->worker_context = NULL; returnTask(t); // } else // LpelTaskDestroy(t); // if task finish, destroy it and not return to master break; case WORKER_MSG_TERMINATE: wc->terminate = 1; break; default: assert(0); break; } // reach here --> message request for task has been sent } while (!(wc->terminate) ); }
/** * Initialise worker globally * * * @param size size of the worker set, i.e., the total number of workers including master */ void LpelWorkersInit(int size) { int i; assert(0 <= size); num_workers = size - 1; /** create master */ master = (masterctx_t *) malloc(sizeof(masterctx_t)); master->mailbox = LpelMailboxCreate(); master->ready_tasks = LpelTaskqueueInit (); master->num_workers = num_workers; /* allocate worker context table */ workers = (workerctx_t **) malloc(num_workers * sizeof(workerctx_t*) ); /* allocate waiting table */ master->waitworkers = (int *) malloc(num_workers * sizeof(int)); /* allocate worker contexts */ for (i=0; i<num_workers; i++) { workers[i] = (workerctx_t *) malloc(sizeof(workerctx_t) ); master->waitworkers[i] = 0; workers[i]->wid = i; #ifdef USE_LOGGING if (MON_CB(worker_create)) { workers[i]->mon = MON_CB(worker_create)(workers[i]->wid); } else { workers[i]->mon = NULL; } #else workers[i]->mon = NULL; #endif /* mailbox */ workers[i]->mailbox = LpelMailboxCreate(); workers[i]->free_sd = NULL; workers[i]->free_stream = NULL; } /* local variables used in worker operations */ initLocalVar(num_workers); }
/** * Replace a stream opened for reading by another stream * Destroys old stream. * * @param sd stream descriptor for which the stream must be replaced * @param snew the new stream * @pre snew must not be opened by same or other task */ void LpelStreamReplace( lpel_stream_desc_t *sd, lpel_stream_t *snew) { assert( sd->mode == 'r'); /* destroy old stream */ LpelStreamDestroy( sd->stream); /* assign new stream */ sd->stream = snew; /* new consumer sd of stream */ sd->stream->cons_sd = sd; /* MONITORING CALLBACK */ #ifdef USE_TASK_EVENT_LOGGING if (sd->mon && MON_CB(stream_replace)) { MON_CB(stream_replace)(sd->mon, snew->uid); } #endif }
static void WaitForNewMessage( workerctx_t *wc) { workermsg_t msg; #ifdef USE_LOGGING if (wc->mon && MON_CB(worker_waitstart)) { MON_CB(worker_waitstart)(wc->mon); } #endif LpelMailboxRecv(wc->mailbox, &msg); #ifdef USE_LOGGING if (wc->mon && MON_CB(worker_waitstop)) { MON_CB(worker_waitstop)(wc->mon); } #endif ProcessMessage( wc, &msg); }
/** * Open a stream for reading/writing * * @param s pointer to stream * @param mode either 'r' for reading or 'w' for writing * @return a stream descriptor * @pre only one task may open it for reading resp. writing * at any given point in time */ lpel_stream_desc_t *LpelStreamOpen( lpel_stream_t *s, char mode) { lpel_stream_desc_t *sd; lpel_task_t *ct = LpelTaskSelf(); assert( mode == 'r' || mode == 'w' ); sd = LpelWorkerGetSd(ct->worker_context); // try to get from the free list if (sd == NULL) sd = (lpel_stream_desc_t *) malloc( sizeof( lpel_stream_desc_t)); sd->task = ct; sd->stream = s; sd->mode = mode; sd->next = NULL; #ifdef USE_TASK_EVENT_LOGGING /* create monitoring object, or NULL if stream * is not going to be monitored (depends on ct->mon) */ if (ct->mon && MON_CB(stream_open)) { sd->mon = MON_CB(stream_open)( ct->mon, s->uid, mode); } else { sd->mon = NULL; } #else sd->mon = NULL; #endif switch(mode) { case 'r': s->cons_sd = sd; break; case 'w': s->prod_sd = sd; break; } /* set entry/exit stream */ if (LpelTaskIsWrapper(ct)) s->type = (mode == 'r' ? LPEL_STREAM_EXIT : LPEL_STREAM_ENTRY); STREAM_DBG("task %d open stream %d, mode %c\n", ct->uid, s->uid, mode); LpelTaskAddStream(ct, sd, mode); return sd; }
static void WrapperLoop(workerctx_t *wp) { lpel_task_t *t = NULL; workermsg_t msg; do { t = wp->current_task; if (t != NULL) { /* execute task */ mctx_switch(&wp->mctx, &t->mctx); } else { /* no ready tasks */ LpelMailboxRecv(wp->mailbox, &msg); switch(msg.type) { case WORKER_MSG_ASSIGN: t = msg.body.task; WORKER_DBG("wrapper: get task %d\n", t->uid); assert(t->state == TASK_CREATED); t->state = TASK_READY; wp->current_task = t; #ifdef USE_LOGGING if (t->mon) { if (MON_CB(worker_create_wrapper)) { wp->mon = MON_CB(worker_create_wrapper)(t->mon); } else { wp->mon = NULL; } } if (t->mon && MON_CB(task_assign)) { MON_CB(task_assign)(t->mon, wp->mon); } #endif break; case WORKER_MSG_WAKEUP: t = msg.body.task; WORKER_DBG("wrapper: unblock task %d\n", t->uid); assert (t->state == TASK_BLOCKED); t->state = TASK_READY; wp->current_task = t; #ifdef USE_LOGGING if (t->mon && MON_CB(task_assign)) { MON_CB(task_assign)(t->mon, wp->mon); } #endif break; default: assert(0); break; } } } while (!wp->terminate); LpelTaskDestroy(wp->current_task); /* cleanup task context marked for deletion */ }
/** * Blocking write to a stream * * If the stream is full, the task is suspended until the consumer * reads items from the stream, freeing space for more items. * * @param sd stream descriptor * @param item data item (a pointer) to write * @pre current task is single writer * @pre item != NULL */ void LpelStreamWrite( lpel_stream_desc_t *sd, void *item) { lpel_task_t *self = sd->task; int poll_wakeup = 0; /* check if opened for writing */ assert( sd->mode == 'w' ); assert( item != NULL ); /* MONITORING CALLBACK */ #ifdef USE_TASK_EVENT_LOGGING if (sd->mon && MON_CB(stream_writeprepare)) { MON_CB(stream_writeprepare)(sd->mon, item); } #endif /* quasi P(e_sem) */ if ( atomic_fetch_sub( &sd->stream->e_sem, 1)== 0) { /* MONITORING CALLBACK */ #ifdef USE_TASK_EVENT_LOGGING if (sd->mon && MON_CB(stream_blockon)) { MON_CB(stream_blockon)(sd->mon); } #endif /* wait on stream: */ LpelTaskBlockStream( self); } /* writing to the buffer and checking if consumer polls must be atomic */ PRODLOCK_LOCK( &sd->stream->prod_lock); { /* there must be space now in buffer */ assert( LpelBufferIsSpace( &sd->stream->buffer) ); /* put item into buffer */ LpelBufferPut( &sd->stream->buffer, item); if ( sd->stream->is_poll) { /* get consumer's poll token */ poll_wakeup = atomic_exchange( &sd->stream->cons_sd->task->poll_token, 0); sd->stream->is_poll = 0; } } PRODLOCK_UNLOCK( &sd->stream->prod_lock); /* quasi V(n_sem) */ if ( atomic_fetch_add( &sd->stream->n_sem, 1) < 0) { /* n_sem was -1 */ lpel_task_t *cons = sd->stream->cons_sd->task; /* wakeup consumer: make ready */ LpelTaskUnblock( self, cons); /* MONITORING CALLBACK */ #ifdef USE_TASK_EVENT_LOGGING if (sd->mon && MON_CB(stream_wakeup)) { MON_CB(stream_wakeup)(sd->mon); } #endif } else { /* we are the sole producer task waking the polling consumer up */ if (poll_wakeup) { lpel_task_t *cons = sd->stream->cons_sd->task; cons->wakeup_sd = sd->stream->cons_sd; LpelTaskUnblock( self, cons); /* MONITORING CALLBACK */ #ifdef USE_TASK_EVENT_LOGGING if (sd->mon && MON_CB(stream_wakeup)) { MON_CB(stream_wakeup)(sd->mon); } #endif } } /* MONITORING CALLBACK */ #ifdef USE_TASK_EVENT_LOGGING if (sd->mon && MON_CB(stream_writefinish)) { MON_CB(stream_writefinish)(sd->mon); } #endif }
static void ProcessMessage( workerctx_t *wc, workermsg_t *msg) { lpel_task_t *t; //WORKER_DBGMSG(wc, "worker %d processing msg %d\n", wc->wid, msg->type); switch( msg->type) { case WORKER_MSG_WAKEUP: /* worker has new ready tasks, * just wakeup to continue loop */ t = msg->body.task; assert(t->state != TASK_READY); t->state = TASK_READY; WORKER_DBGMSG(wc, "Received wakeup for %d.\n", t->uid); if (wc->wid < 0) { wc->wraptask = t; } else { LpelSchedMakeReady( wc->sched, t); } break; case WORKER_MSG_TERMINATE: wc->terminate = 1; break; case WORKER_MSG_ASSIGN: t = msg->body.task; assert(t->state == TASK_CREATED); t->state = TASK_READY; wc->num_tasks++; WORKER_DBGMSG(wc, "Assigned task %d.\n", t->uid); if (wc->wid < 0) { wc->wraptask = t; /* create monitoring context if necessary */ #ifdef USE_LOGGING if (t->mon) { if (MON_CB(worker_create_wrapper)) { wc->mon = MON_CB(worker_create_wrapper)(t->mon); } else { wc->mon = NULL; } if ( wc->mon && MON_CB(worker_waitstart)) { MON_CB(worker_waitstart)(wc->mon); } } #endif } else { LpelSchedMakeReady( wc->sched, t); } #ifdef USE_LOGGING /* assign monitoring context to taskmon */ if (t->mon && MON_CB(task_assign)) { MON_CB(task_assign)(t->mon, wc->mon); } #endif break; case WORKER_MSG_SPMDREQ: assert(wc->wid >= 0); /* This message serves the sole purpose to wake up any sleeping workers, * as handling of requests is done before execution of a task. */ /* WORKER_DBGMSG(wc, "Received spmd request notification" " from worker %d!\n", msg->body.from_worker); */ break; default: assert(0); } }
/** * Thread function for workers (and wrappers) */ static void *WorkerThread( void *arg) { workerctx_t *wc = (workerctx_t *)arg; lpel_task_t *t; #ifdef HAVE___THREAD workerctx_cur = wc; #else /* HAVE___THREAD */ /* set pointer to worker context as TSD */ pthread_setspecific(workerctx_key, wc); #endif /* HAVE___THREAD */ #ifdef USE_MCTX_PCL int res = co_thread_init(); assert( 0 == res); wc->mctx = co_current(); #endif wc->current_task = NULL; /* no task marked for deletion */ wc->marked_del = NULL; /* assign to cores */ LpelThreadAssign( wc->wid); /*******************************************************/ if ( wc->wid >= 0) { WorkerLoop( wc); } else { WrapperLoop( wc); } /*******************************************************/ #ifdef USE_LOGGING /* cleanup monitoring */ if (wc->mon && MON_CB(worker_destroy)) { MON_CB(worker_destroy)(wc->mon); } #endif /* destroy all the free tasks */ while ((t = LpelPopTask(free, &wc->free_tasks))) { LpelTaskDestroy(t); } /* on a wrapper, we also can cleanup more*/ if (wc->wid < 0) { /* clean up the mailbox for the worker */ LpelMailboxDestroy(wc->mailbox); /* free the worker context */ free( wc); } #ifdef USE_MCTX_PCL co_thread_cleanup(); #endif return NULL; }