caf::behavior main_phase() { send(this, tick_atom::value); return { [=](const std::vector<uint16_t>& data, uint32_t id) { concat_data(data, id); }, [=](tick_atom) { delayed_send(this, tick_rate_, tick_atom::value); if (cache_.empty()) { std::cout << "[WARNING] Cache empty..." << std::endl; return; } send(sink_, image_width_, cache_.front()); cache_.pop(); send_job(); }, [=](resize_atom, uint32_t w, uint32_t h) { resize(w,h); }, [=](limit_atom, normal_atom, uint32_t workers) { become(make_behavior()); send(this, calc_weights_atom::value, size_t{workers}); }, caf::others() >> [=] { std::cout << to_string(current_message()) << std::endl; } }; }
caf::behavior init_buffer() { // Initial send tasks auto pending_chunks = std::make_shared<int>(workers_.size()); auto pending_jobs = std::make_shared<int>(buffer_min_size_); send_job(); return { [=](const std::vector<uint16_t>& data, uint32_t id) { concat_data(data, id); if (! --*pending_chunks) { if (--*pending_jobs) { send_job(); *pending_chunks = workers_.size(); } else { become(main_phase()); } } }, [=](resize_atom, uint32_t w, uint32_t h) { resize(w,h); }, [=](limit_atom, normal_atom, uint32_t workers) { become(make_behavior()); send(this, calc_weights_atom::value, size_t{workers}); }, caf::others() >> [=] { std::cout << to_string(current_message()) << std::endl; } }; }
static void message_queue_hook(int eEvent) { /*NOTE: the queue will hang while this function is processing*/ static int processing = 0; command_handle new_command = NULL; const struct message_info *message = NULL; if (eEvent == RSERVR_QUEUE_MESSAGE) { if (processing) return; processing = 1; while ((message = current_message())) { new_command = NULL; if (RSERVR_IS_REQUEST(message) && !RSERVR_IS_BINARY(message)) new_command = client_response(RSERVR_RESPOND(message), event_complete, RSERVR_TO_REQUEST_SINGLE(message)); else if (RSERVR_IS_INFO(message) && !RSERVR_IS_BINARY(message)) { fprintf(stderr, "%s\n", RSERVR_TO_INFO_MESSAGE(message)); new_command = short_response(RSERVR_RESPOND(message), event_complete); } else if (!RSERVR_IS_RESPONSE(message)) new_command = short_response(RSERVR_RESPOND(message), event_error); if (new_command) { send_command_no_status(new_command); destroy_command(new_command); } remove_current_message(); } processing = 0; } }
caf::behavior make_behavior() override { auto start_map = std::make_shared< std::map<caf::actor, std::chrono::time_point< std::chrono::high_resolution_clock> > >(); return { [=](init_atom, const std::vector<caf::actor>& all_workers) { std::cout << "init: " << all_workers.size() << std::endl; all_workers_.clear(); all_workers_.insert(std::begin(all_workers_), std::begin(all_workers), std::end(all_workers)); send(this, calc_weights_atom::value, all_workers_.size()); }, [=](calc_weights_atom, size_t workers_to_use) { // Clear all caches chunk_cache_.clear(); image_cache empty; std::swap(cache_, empty); start_map->clear(); workers_.clear(); // for (size_t i = 0; i < workers_to_use; ++i) workers_.emplace(all_workers_[i], 0); using hrc = std::chrono::high_resolution_clock; auto req = stream_.next(); // TODO: Get a image with much black uint32_t req_width = width(req); uint32_t req_height = height(req); auto req_min_re = min_re(req); auto req_max_re = max_re(req); auto req_min_im = min_im(req); auto req_max_im = max_im(req); uint32_t offset = 0; uint32_t rows = image_height_; for (auto& e : workers_) { auto& worker = e.first; start_map->emplace(worker, hrc::now()); send(worker, default_iterations, req_width, req_height, offset, rows, req_min_re, req_max_re, req_min_im, req_max_im); } }, [=](const std::vector<uint16_t>& data, uint32_t) { if (data.size() != image_size_) return; // old data TODO: Problem with 1 worker? auto sender = caf::actor_cast<caf::actor>(current_sender()); auto t2 = std::chrono::high_resolution_clock::now(); auto diff = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - (*start_map)[sender]).count(); workers_[sender] = diff; start_map->erase(sender); if (start_map->empty()) { auto add = [](size_t lhs, const std::pair<caf::actor, double>& rhs) { return lhs + rhs.second; }; auto total_time = std::accumulate(workers_.begin(), workers_.end(), size_t{0}, add); for (auto& e : workers_) e.second = e.second / total_time; double time = diff; for (auto& e : workers_) { time *= e.second; break; } auto ms = static_cast<double>(std::chrono::milliseconds(static_cast<uint16_t>(time)).count()); auto sec = static_cast<double>(std::chrono::milliseconds(1000).count()); double fps = sec / ms; tick_rate_ = std::chrono::milliseconds(/*static_cast<uint16_t>(time) + 50*/1000); std::cout << "Assumed FPS: " << fps << std::endl; //buffer_min_size_ = fps < 1 ? static_cast<uint32_t>(1.0 / fps) // : static_cast<uint32_t>(fps); //buffer_min_size_ *= seconds_to_buffer_; // FIXME buffer_min_size_ = static_cast<uint32_t>(fps) + 1 * seconds_to_buffer_; buffer_max_size_ = buffer_min_size_ * 4; become(init_buffer()); } }, [=](resize_atom, uint32_t w, uint32_t h) { resize(w,h); }, [=](limit_atom, normal_atom, uint32_t workers) { send(this, calc_weights_atom::value, size_t{workers}); }, caf::others() >> [=] { std::cout << to_string(current_message()) << std::endl; } }; }
void nexus::broadcast() { for (auto& l : listeners_) { // we now for sure that l can handle last_dequeued() send(actor_cast<actor>(l), current_message()); } }