Exemplo n.º 1
0
  virtual void resourceOffers(SchedulerDriver* driver,
                              const vector<Offer>& offers)
  {
    for (size_t i = 0; i < offers.size(); i++) {
      const Offer& offer = offers[i];
      Resources remaining = offer.resources();

      static Resources TASK_RESOURCES = Resources::parse(
          "cpus:" + stringify<float>(CPUS_PER_TASK) +
          ";mem:" + stringify<size_t>(MEM_PER_TASK)).get();

      size_t maxTasks = 0;
      while (remaining.flatten().contains(TASK_RESOURCES)) {
        maxTasks++;
        remaining -= TASK_RESOURCES;
      }

      // Launch tasks.
      vector<TaskInfo> tasks;
      for (size_t i = 0; i < maxTasks / 2 && crawlQueue.size() > 0; i++) {
        string url = crawlQueue.front();
        crawlQueue.pop();
        string urlId = "C" + stringify<size_t>(processed[url]);
        TaskInfo task;
        task.set_name("Crawler " + urlId);
        task.mutable_task_id()->set_value(urlId);
        task.mutable_slave_id()->MergeFrom(offer.slave_id());
        task.mutable_executor()->MergeFrom(crawler);
        task.mutable_resources()->MergeFrom(TASK_RESOURCES);
        task.set_data(url);
        tasks.push_back(task);
        tasksLaunched++;
        cout << "Crawler " << urlId << " " << url << endl;
      }
      for (size_t i = maxTasks/2; i < maxTasks && renderQueue.size() > 0; i++) {
        string url = renderQueue.front();
        renderQueue.pop();
        string urlId = "R" + stringify<size_t>(processed[url]);
        TaskInfo task;
        task.set_name("Renderer " + urlId);
        task.mutable_task_id()->set_value(urlId);
        task.mutable_slave_id()->MergeFrom(offer.slave_id());
        task.mutable_executor()->MergeFrom(renderer);
        task.mutable_resources()->MergeFrom(TASK_RESOURCES);
        task.set_data(url);
        tasks.push_back(task);
        tasksLaunched++;
        cout << "Renderer " << urlId << " " << url << endl;
      }

      driver->launchTasks(offer.id(), tasks);
    }
  }
Exemplo n.º 2
0
  virtual void resourceOffers(SchedulerDriver* driver,
                              const std::vector<Offer>& offers)
  {
    std::cout << "Resource offers received" << std::endl;

    for (size_t i = 0; i < offers.size(); i++) {
      const Offer& offer = offers[i];

      // We just launch one task.
      if (!taskLaunched) {
        double mem = getScalarResource(offer, "mem");
        assert(mem > EXECUTOR_MEMORY_MB);

        std::vector<TaskInfo> tasks;
        std::cout << "Starting the task" << std::endl;

        TaskInfo task;
        task.set_name("Balloon Task");
        task.mutable_task_id()->set_value("1");
        task.mutable_slave_id()->MergeFrom(offer.slave_id());
        task.mutable_executor()->MergeFrom(executor);
        task.set_data(stringify<size_t>(balloonLimit));

        // Use up all the memory from the offer.
        Resource* resource;
        resource = task.add_resources();
        resource->set_name("mem");
        resource->set_type(Value::SCALAR);
        resource->mutable_scalar()->set_value(mem - EXECUTOR_MEMORY_MB);

        // And all the CPU.
        double cpus = getScalarResource(offer, "cpus");
        resource = task.add_resources();
        resource->set_name("cpus");
        resource->set_type(Value::SCALAR);
        resource->mutable_scalar()->set_value(cpus);

        tasks.push_back(task);
        driver->launchTasks(offer.id(), tasks);

        taskLaunched = true;
      }
    }
  }
Exemplo n.º 3
0
void ChapelScheduler::resourceOffers(SchedulerDriver* driver, 
                                     const vector<Offer>& offers) 
{
   // offers only contain resources describing a single node -> for more details read include/mesos/mesos.proto
   // 
   cout << "***\tProcessing Offers!" << endl;

   const int remainingCpusReq = cpusReq - launchedTsks.size();

   if(remainingCpusReq == 0) {

      for(size_t k = 0; k < offers.size(); k++) {
         const Offer& offer = offers[k];
         driver->declineOffer(offer.id());
      }

      cout << "\t\tChapelScheduler declined offer because resource requirements satisfied" << endl;
   }

   // cycle through all the offers and resource a task
   // each offer corresponds to a single compute node
   //
   const static Resources TASK_RESOURCES = Resources::parse(mesosReq).get();
   vector<TaskInfo> tsks;

   for(size_t i = 0; i < offers.size(); i++) {
      const Offer& offer = offers[i];

      if(tsks.size() == remainingCpusReq) {
         driver->declineOffer(offer.id());
         continue; // need to cycle through the remaining offers and decline them
      }

      Resources remaining = offer.resources();

      /* attempting to exercise multi-tenancy capabilities in mesos
       * given an offer from a node, try to maximize the number of jobs
       * that can be allocated to that node given the job's resource
       * requirements
       *
       * if the desired number of nodes and jobs are met, then launch
       * all the jobs on that node's offer
       *
       * this means some nodes will get multiple tasks assigned for
       * execution 
       */

      vector<TaskInfo> tol;

      while(remaining.flatten().contains(TASK_RESOUCES) && ((remainingCpusReq-tsks.size()) > 0)) {
         const string tid = stringify<size_t>(tsks.size());

         TaskInfo task;
         task.set_name("Chapel Remote Program Task\t" + tid);

         task.mutable_task_id()->set_value(tid);
         task.mutable_slave_id()->MergeFrom(offer.slave_id());
         task.mutable_command()->MergeFrom(chplCmdInfo);
         task.mutable_resources()->MergeFrom(TASK_RESOURCES);

         task.set_data(remoteCmd);
         tol.push_back(task); // tol means "to launch"
         tsks.push_back(task); // tsks tracks tasks launched for framework termination purposes

         remaining-=TASK_RESOURCES;
         tasksLaunched+=1;

         cout << "\t\t+++\tLaunching # of Tasks!\t" << tol.size() << " of " << tasksLaunched << endl;
      }

      // after all the tasks for this offer have been "resourced"
      // launch the tasks using this offer.id
      //
      driver->launchTasks(offer.id(), tol);
   }

   const size_t pendingTsksSize = tsks.size();
   cout << endl << "\tAcquired # tasks " << pendingTsksSize << " required # of tasks " << cpusReq << " remaining required # tasks " << remainingCpusReq << endl << endl;
   
   if(pendingTsksSize > 0) {
      for(vector<TaskInfo>::iterator i = tsks.begin(); i != tsks.end(); i++) {
         launchedTsks.insert(make_pair(i->task_id().value(), *i));
      }
   }

}