/******************************************************************************* * MASTER FUNCTION ******************************************************************************/ static int servePendingReq(masterctx_t *master, lpel_task_t *t) { int i; t->sched_info.prior = LpelTaskCalPriority(t); for (i = 0; i < num_workers; i++){ if (master->waitworkers[i] == 1) { master->waitworkers[i] = 0; WORKER_DBG("master: send task %d to worker %d\n", t->uid, i); sendTask(i, t); return i; } } return -1; }
void MPIWorkerMaster::run() { Profiler::getInstance().setEnabled(true); CriticalDegree degree; //FlightDataReader reader("c:\\basic1.txt"); FlightDataReader reader("c:\\basic2.txt"); //FlightDataReader reader("c:\\big1.txt"); //FlightDataReader reader("c:\\big2.txt"); echo(MakeString() << "Load data... (" << reader.getFileName() << ")"); reader.open(); reader.readHeader(); Profiler::getInstance().start("1. Read flights data"); // 1. Read flights data std::vector<Flight> flights = reader.readFlights(); Profiler::getInstance().finish(); ProjectInfo projectInfo = reader.getProjectInfo(); echo(MakeString() << "Project info: " << projectInfo.dump().str()); //Init available slaves (if any) with project info (mSlaveQueue) initSlaves(projectInfo); // 2. Build flight paths // OpenMP splitted by number of flights () buildFlightsPathsParallel(projectInfo, flights); // Calculate total number of project spaces to be processed int numOfTasks = calcNumberOfTasks(projectInfo); echo(MakeString() << "Max number of threads: " << omp_get_max_threads()); echo(MakeString() << "Total number of tasks: " << numOfTasks); echo("Processing..."); int progress = 1; Profiler::getInstance().start("Process project spaces"); ProjectSpaceBuilder builder(projectInfo, flights); // 3. Calculate Critical Degree while(builder.nextTime()) { ProjectSpace projectSpace = builder.build(); // LB - Have free workers send more tasks if (mSlaveQueue.size() > 0) { sendTask(projectSpace); } else { CriticalLevel level = executeTask(projectSpace); degree.addCriticalLevel(level); } collectSlaveResults(degree); if (progress % 10000 == 0) { echo (MakeString() << " Progress: " << progress); } progress++; } Profiler::getInstance().finish(); echo("Collect results from still running slaves."); while(mSlaveRunningTasks > 0) { collectSlaveResults(degree); } sendSlavesFinishSignal(); printResult(degree); }
static void MasterLoop(masterctx_t *master) { WORKER_DBG("start master\n"); do { workermsg_t msg; LpelMailboxRecv(mastermb, &msg); lpel_task_t *t; int wid; switch(msg.type) { case WORKER_MSG_ASSIGN: /* master receive a new task */ t = msg.body.task; assert (t->state == TASK_CREATED); t->state = TASK_READY; WORKER_DBG("master: get task %d\n", t->uid); if (servePendingReq(master, t) < 0) { // no pending request t->sched_info.prior = DBL_MAX; //created task does not set up input/output stream yet, set as highest priority t->state = TASK_INQUEUE; LpelTaskqueuePush(master->ready_tasks, t); } break; case WORKER_MSG_RETURN: t = msg.body.task; WORKER_DBG("master: get returned task %d\n", t->uid); switch(t->state) { case TASK_BLOCKED: if (t->wakenup == 1) { /* task has been waked up */ t->wakenup = 0; t->state = TASK_READY; // no break, task will be treated as if it is returned as ready } else { t->state = TASK_RETURNED; updatePriorityNeigh(master->ready_tasks, t); break; } case TASK_READY: // task yields #ifdef _USE_NEG_DEMAND_LIMIT_ t->sched_info.prior = LpelTaskCalPriority(t); if (t->sched_info.prior == LPEL_DBL_MIN) { // if not schedule task if it has too low priority t->state = TASK_INQUEUE; LpelTaskqueuePush(master->ready_tasks, t); break; } #endif if (servePendingReq(master, t) < 0) { // no pending request updatePriorityNeigh(master->ready_tasks, t); t->sched_info.prior = LpelTaskCalPriority(t); //update new prior before add to the queue t->state = TASK_INQUEUE; LpelTaskqueuePush(master->ready_tasks, t); } break; case TASK_ZOMBIE: updatePriorityNeigh(master->ready_tasks, t); LpelTaskDestroy(t); break; default: assert(0); break; } break; case WORKER_MSG_WAKEUP: t = msg.body.task; if (t->state != TASK_RETURNED) { // task has not been returned yet t->wakenup = 1; // set task as wakenup so that when returned it will be treated as ready break; } WORKER_DBG("master: unblock task %d\n", t->uid); t->state = TASK_READY; #ifdef _USE_NEG_DEMAND_LIMIT_ t->sched_info.prior = LpelTaskCalPriority(t); if (t->sched_info.prior == LPEL_DBL_MIN) { // if not schedule task if it has too low priority t->state = TASK_INQUEUE; LpelTaskqueuePush(master->ready_tasks, t); break; } #endif if (servePendingReq(master, t) < 0) { // no pending request #ifndef _USE_NEG_DEMAND_LIMIT_ t->sched_info.prior = LpelTaskCalPriority(t); //update new prior before add to the queue #endif t->state = TASK_INQUEUE; LpelTaskqueuePush(master->ready_tasks, t); } break; case WORKER_MSG_REQUEST: wid = msg.body.from_worker; WORKER_DBG("master: request task from worker %d\n", wid); t = LpelTaskqueuePeek(master->ready_tasks); if (t == NULL) { master->waitworkers[wid] = 1; } else { #ifdef _USE_NEG_DEMAND_LIMIT_ if (t->sched_info.prior == LPEL_DBL_MIN) { // if not schedule task if it has too low priority master->waitworkers[wid] = 1; break; } #endif t->state = TASK_READY; sendTask(wid, t); t = LpelTaskqueuePop(master->ready_tasks); } break; case WORKER_MSG_TERMINATE: master->terminate = 1; break; default: assert(0); } } while (!(master->terminate && LpelTaskqueueSize(master->ready_tasks) == 0)); }