void frameworkMessage(const SlaveID& slaveId, const FrameworkID& frameworkId, const ExecutorID& executorId, const string& data) { VLOG(1) << "Received framework message"; invoke(bind(&Scheduler::frameworkMessage, sched, driver, cref(slaveId), cref(executorId), cref(data))); }
real Closure::P2_22(real k) const { #if 0 real a[2] = { QMIN, -1 }; real b[2] = { kcut, +1 }; return (k == 0) ? 0 : 2*Integrate<2>(bind(&Closure::f2_22, cref(*this), k, _1, _2), a, b, 1e-4, 1e-4*P_L(k)); #endif real a[2] = { k/M_SQRT2, 0 }; real b[2] = { kcut, k/M_SQRT2 }; return (k == 0) ? 0 : 2*Integrate<2>(bind(&Closure::F2_22, cref(*this), k, _1, _2), a, b, 1e-3, 1e-3*P_L(k)/2); }
void statusUpdate(const StatusUpdate& update, const UPID& pid) { const TaskStatus& status = update.status(); VLOG(1) << "Status update: task " << status.task_id() << " of framework " << update.framework_id() << " is now in state " << status.state(); CHECK(frameworkId == update.framework_id()); // TODO(benh): Note that this maybe a duplicate status update! // Once we get support to try and have a more consistent view // of what's running in the cluster, we'll just let this one // slide. The alternative is possibly dealing with a scheduler // failover and not correctly giving the scheduler it's status // update, which seems worse than giving a status update // multiple times (of course, if a scheduler re-uses a TaskID, // that could be bad. invoke(bind(&Scheduler::statusUpdate, sched, driver, cref(status))); if (pid) { // Acknowledge the message (we do this last, after we invoked // the scheduler, if we did at all, in case it causes a crash, // since this way the message might get resent/routed after the // scheduler comes back online). StatusUpdateAcknowledgementMessage message; message.mutable_framework_id()->MergeFrom(frameworkId); message.mutable_slave_id()->MergeFrom(update.slave_id()); message.mutable_task_id()->MergeFrom(status.task_id()); message.set_uuid(update.uuid()); send(pid, message); } }
int main() { fusion::vector<int,char> lv_vec(1,'\004'); test_func<> f; test_func<noncopyable> f_nc; fusion::result_of::make_fused_procedure< test_func<> >::type fused_func = fusion::make_fused_procedure(f); CHECK_EFFECT(fused_func(lv_vec), 1); CHECK_EFFECT(const_(fused_func)(lv_vec), 0); CHECK_EFFECT(fusion::make_fused_procedure(const_(f))(lv_vec), 1); CHECK_EFFECT(fusion::make_fused_procedure(ref(f_nc))(lv_vec), 1); CHECK_EFFECT(fusion::make_fused_procedure(cref(f_nc))(lv_vec), 0); CHECK_EFFECT(fused_func(fusion::make_vector(2,'\003')), 1); CHECK_EFFECT(const_(fused_func)(fusion::make_vector(2,'\003')), 0); CHECK_EFFECT(fusion::make_fused_procedure(const_(f))(fusion::make_vector(2,'\003')), 1); CHECK_EFFECT(fusion::make_fused_procedure(ref(f_nc))(fusion::make_vector(2,'\003')), 1); CHECK_EFFECT(fusion::make_fused_procedure(cref(f_nc))(fusion::make_vector(2,'\003')), 0); return boost::report_errors(); }
void resourceOffer(const OfferID& offerId, const vector<SlaveOffer>& offers, const vector<string>& pids) { VLOG(1) << "Received offer " << offerId; // Save the pid associated with each slave (one per SlaveOffer) so // later we can send framework messages directly. CHECK(offers.size() == pids.size()); for (int i = 0; i < offers.size(); i++) { UPID pid(pids[i]); if (pid != UPID()) { VLOG(2) << "Saving PID '" << pids[i] << "'"; savedOffers[offerId][offers[i].slave_id()] = pid; } else { // Parsing of a PID may fail due to DNS! VLOG(2) << "Failed to parse PID '" << pids[i] << "'"; } } invoke(bind(&Scheduler::resourceOffer, sched, driver, cref(offerId), cref(offers))); }
void error(int32_t code, const string& message) { VLOG(1) << "Got error '" << message << "' (code: " << code << ")"; invoke(bind(&Scheduler::error, sched, driver, code, cref(message))); }
void lostSlave(const SlaveID& slaveId) { VLOG(1) << "Lost slave " << slaveId; savedSlavePids.erase(slaveId); invoke(bind(&Scheduler::slaveLost, sched, driver, cref(slaveId))); }
void rescindOffer(const OfferID& offerId) { VLOG(1) << "Rescinded offer " << offerId; savedOffers.erase(offerId); invoke(bind(&Scheduler::offerRescinded, sched, driver, cref(offerId))); }
void registered(const FrameworkID& frameworkId) { VLOG(1) << "Framework registered with " << frameworkId; this->frameworkId = frameworkId; invoke(bind(&Scheduler::registered, sched, driver, cref(frameworkId))); }
void add_thread(const T &t) { using boost::cref; threads_.add_thread(new boost::thread(thread(cref(t), cref(*this)))); }
real Closure::G_i(real k) const { return -k*k/(4*M_PI*M_PI) * 1/36. * Integrate<ExpSub>(bind(B_i, cref(P_i), k, _1), QMIN, QMAX, EPS); }
real Closure::G_h(real k) const { return k*k/(4*M_PI*M_PI) * 1/12. * Integrate<ExpSub>(bind(B_h, cref(P_i), k, _1), QMIN, QMAX, EPS); }