static int request_io(struct knot_requestor *req, struct knot_request *last, struct timeval *timeout) { int ret = KNOT_EOK; knot_pkt_t *query = last->query; knot_pkt_t *resp = last->resp; /* Data to be sent. */ if (req->overlay.state == KNOT_STATE_PRODUCE) { /* Process query and send it out. */ knot_overlay_produce(&req->overlay, query); if (req->overlay.state == KNOT_STATE_CONSUME) { ret = request_send(last, timeout); if (ret != KNOT_EOK) { return ret; } } } /* Data to be read. */ if (req->overlay.state == KNOT_STATE_CONSUME) { /* Read answer and process it. */ ret = request_recv(last, timeout); if (ret < 0) { return ret; } (void) knot_pkt_parse(resp, 0); knot_overlay_consume(&req->overlay, resp); } return KNOT_EOK; }
int main(int argc, char** argv) { utils::mpi_world mpi_world(argc, argv); const int mpi_rank = MPI::COMM_WORLD.Get_rank(); const int mpi_size = MPI::COMM_WORLD.Get_size(); try { if (getoptions(argc, argv) != 0) return 1; if (command.empty()) throw std::runtime_error("no command?"); typedef MapReduce map_reduce_type; typedef map_reduce_type::id_type id_type; typedef map_reduce_type::value_type value_type; typedef map_reduce_type::queue_type queue_type; typedef map_reduce_type::queue_id_type queue_id_type; typedef map_reduce_type::subprocess_type subprocess_type; typedef Mapper mapper_type; typedef Reducer reducer_type; typedef Consumer consumer_type; typedef Merger merger_type; if (mpi_rank == 0) { subprocess_type subprocess(command); queue_type queue_is(mpi_size); queue_type queue_send(1); queue_id_type queue_id; queue_type queue_recv; const bool flush_output = (output_file == "-" || (boost::filesystem::exists(output_file) && ! boost::filesystem::is_regular_file(output_file))); utils::compress_istream is(input_file, 1024 * 1024); utils::compress_ostream os(output_file, 1024 * 1024 * (! flush_output)); boost::thread consumer(consumer_type(queue_is, is)); boost::thread merger(merger_type(queue_recv, os, mpi_size)); boost::thread mapper(mapper_type(queue_send, queue_id, subprocess)); boost::thread reducer(reducer_type(queue_recv, queue_id, subprocess)); typedef utils::mpi_ostream ostream_type; typedef utils::mpi_istream_simple istream_type; typedef boost::shared_ptr<ostream_type> ostream_ptr_type; typedef boost::shared_ptr<istream_type> istream_ptr_type; typedef std::vector<ostream_ptr_type, std::allocator<ostream_ptr_type> > ostream_ptr_set_type; typedef std::vector<istream_ptr_type, std::allocator<istream_ptr_type> > istream_ptr_set_type; ostream_ptr_set_type ostream(mpi_size); istream_ptr_set_type istream(mpi_size); for (int rank = 1; rank < mpi_size; ++ rank) { ostream[rank].reset(new ostream_type(rank, line_tag, 4096)); istream[rank].reset(new istream_type(rank, line_tag, 4096)); } std::string line; value_type value(0, std::string()); value_type value_recv(0, std::string()); int non_found_iter = 0; while (value.first != id_type(-1)) { bool found = false; for (int rank = 1; rank < mpi_size && value.first != id_type(-1); ++ rank) if (ostream[rank]->test() && queue_is.pop(value, true) && value.first != id_type(-1)) { ostream[rank]->write(utils::lexical_cast<std::string>(value.first) + ' ' + value.second); found = true; } if (queue_send.empty() && queue_is.pop(value, true) && value.first != id_type(-1)) { queue_send.push(value); found = true; } // reduce... for (int rank = 1; rank < mpi_size; ++ rank) if (istream[rank] && istream[rank]->test()) { if (istream[rank]->read(line)) { tokenize(line, value_recv); queue_recv.push_swap(value_recv); } else { queue_recv.push(std::make_pair(id_type(-1), std::string())); istream[rank].reset(); } found = true; } non_found_iter = loop_sleep(found, non_found_iter); } bool terminated = false; for (;;) { bool found = false; if (! terminated && queue_send.push(std::make_pair(id_type(-1), std::string()), true)) { terminated = true; found = true; } // termination... for (int rank = 1; rank < mpi_size; ++ rank) if (ostream[rank] && ostream[rank]->test()) { if (! ostream[rank]->terminated()) ostream[rank]->terminate(); else ostream[rank].reset(); found = true; } // reduce... for (int rank = 1; rank < mpi_size; ++ rank) if (istream[rank] && istream[rank]->test()) { if (istream[rank]->read(line)) { tokenize(line, value_recv); queue_recv.push_swap(value_recv); } else { queue_recv.push(std::make_pair(id_type(-1), std::string())); istream[rank].reset(); } found = true; } // termination condition! if (std::count(istream.begin(), istream.end(), istream_ptr_type()) == mpi_size && std::count(ostream.begin(), ostream.end(), ostream_ptr_type()) == mpi_size && terminated) break; non_found_iter = loop_sleep(found, non_found_iter); } mapper.join(); reducer.join(); consumer.join(); merger.join(); } else { subprocess_type subprocess(command); queue_type queue_send(1); queue_id_type queue_id; queue_type queue_recv; boost::thread mapper(mapper_type(queue_send, queue_id, subprocess)); boost::thread reducer(reducer_type(queue_recv, queue_id, subprocess)); typedef utils::mpi_istream istream_type; typedef utils::mpi_ostream_simple ostream_type; boost::shared_ptr<istream_type> is(new istream_type(0, line_tag, 4096)); boost::shared_ptr<ostream_type> os(new ostream_type(0, line_tag, 4096)); std::string line; value_type value; bool terminated = false; int non_found_iter = 0; for (;;) { bool found = false; if (is && is->test() && queue_send.empty()) { if (is->read(line)) tokenize(line, value); else { value.first = id_type(-1); value.second = std::string(); is.reset(); } queue_send.push_swap(value); found = true; } if (! terminated) { if (os && os->test() && queue_recv.pop_swap(value, true)) { if (value.first == id_type(-1)) terminated = true; else os->write(utils::lexical_cast<std::string>(value.first) + ' ' + value.second); found = true; } } else { if (os && os->test()) { if (! os->terminated()) os->terminate(); else os.reset(); found = true; } } if (! is && ! os) break; non_found_iter = loop_sleep(found, non_found_iter); } mapper.join(); reducer.join(); } // synchronize... if (mpi_rank == 0) { std::vector<MPI::Request, std::allocator<MPI::Request> > request_recv(mpi_size); std::vector<MPI::Request, std::allocator<MPI::Request> > request_send(mpi_size); std::vector<bool, std::allocator<bool> > terminated_recv(mpi_size, false); std::vector<bool, std::allocator<bool> > terminated_send(mpi_size, false); terminated_recv[0] = true; terminated_send[0] = true; for (int rank = 1; rank != mpi_size; ++ rank) { request_recv[rank] = MPI::COMM_WORLD.Irecv(0, 0, MPI::INT, rank, notify_tag); request_send[rank] = MPI::COMM_WORLD.Isend(0, 0, MPI::INT, rank, notify_tag); } int non_found_iter = 0; for (;;) { bool found = false; for (int rank = 1; rank != mpi_size; ++ rank) if (! terminated_recv[rank] && request_recv[rank].Test()) { terminated_recv[rank] = true; found = true; } for (int rank = 1; rank != mpi_size; ++ rank) if (! terminated_send[rank] && request_send[rank].Test()) { terminated_send[rank] = true; found = true; } if (std::count(terminated_send.begin(), terminated_send.end(), true) == mpi_size && std::count(terminated_recv.begin(), terminated_recv.end(), true) == mpi_size) break; non_found_iter = loop_sleep(found, non_found_iter); } } else { MPI::Request request_send = MPI::COMM_WORLD.Isend(0, 0, MPI::INT, 0, notify_tag); MPI::Request request_recv = MPI::COMM_WORLD.Irecv(0, 0, MPI::INT, 0, notify_tag); bool terminated_send = false; bool terminated_recv = false; int non_found_iter = 0; for (;;) { bool found = false; if (! terminated_send && request_send.Test()) { terminated_send = true; found = true; } if (! terminated_recv && request_recv.Test()) { terminated_recv = true; found = true; } if (terminated_send && terminated_recv) break; non_found_iter = loop_sleep(found, non_found_iter); } } } catch (const std::exception& err) { std::cerr << "error: " << argv[0] << " "<< err.what() << std::endl; MPI::COMM_WORLD.Abort(1); return 1; } return 0; }
// ikst_NotifyFunc // // Important: Any callback made here must always be in a tail // position. static void transportCallback(void *user_data, ikst_event *event) { ikss_Stream *self = (ikss_Stream*)user_data; switch (event->event) { // We will not automatically send any headers or anything, but // we do provide methods for doing that. The stream should be // ready for messaging right away. case ikst_CONNECTED: { int ret = request_recv(self); if (ret) { (self->notifyFunc)(self->user_data, ikss_IKS_ERROR, event->data0); } else { (self->notifyFunc)(self->user_data, ikss_CONNECTED, 0); } break; } // Really, if the server is behaving nicely, it should send // </stream> before EOF, and that should trigger us closing the // connection before we get the EOF. But this matters little, // getting an EOF error here should lead to the same end result. case ikst_EOF: // It is not safe for us to delete ourselves or anything, but // the client may want to do that right away upon receiving this // event. case ikst_IKS_ERROR: case ikst_PLAT_ERROR: { #if !defined(__SYMBIAN32__) if (ikst_PLAT_ERROR == event->event) { printf("transport error: %s (%d)\n", strerror(event->data0), event->data0); } #endif /* __SYMBIAN32__ */ (self->notifyFunc)(self->user_data, event->event, event->data0); break; } case ikst_WRITE_OK: { // If there is anything left to send, resume sending, // otherwise propagate the event to the client so that it // knows it can send more messages. self->send_count += event->data0; if ((self->send_count < self->send_len) || self->send_more_buf) { int ret = send_more(self); if (ret) { (self->notifyFunc)(self->user_data, ikss_IKS_ERROR, ret); } } else { (self->notifyFunc)(self->user_data, ikss_SENT, 0); } break; } case ikst_READ_OK: { // Pass the read data to the parser, and if it was // syntactically fine, then put in another recv request. int ret = (handle_recv (self, event->data0) || request_recv (self)); if (ret) { (self->notifyFunc)(self->user_data, ikss_IKS_ERROR, ret); } break; } default: { assert(0); break; } } }