int client() { using GetSubTreeType = std::vector<std::pair< std::string, std::vector<std::pair<std::string, std::vector<std::string>>>>>; using message = sdbusplus::message::message; // setup connection to dbus boost::asio::io_context io; auto conn = std::make_shared<sdbusplus::asio::connection>(io); int ready = 0; while (!ready) { auto readyMsg = conn->new_method_call( "xyz.openbmc_project.asio-test", "/xyz/openbmc_project/test", "xyz.openbmc_project.test", "VoidFunctionReturnsInt"); try { message intMsg = conn->call(readyMsg); intMsg.read(ready); } catch (sdbusplus::exception::SdBusError& e) { ready = 0; // pause to give the server a chance to start up usleep(10000); } } // test async method call and async send auto mesg = conn->new_method_call("xyz.openbmc_project.ObjectMapper", "/xyz/openbmc_project/object_mapper", "xyz.openbmc_project.ObjectMapper", "GetSubTree"); static const auto depth = 2; static const std::vector<std::string> interfaces = { "xyz.openbmc_project.Sensor.Value"}; mesg.append("/xyz/openbmc_project/Sensors", depth, interfaces); conn->async_send(mesg, [](boost::system::error_code ec, message& ret) { std::cout << "async_send callback\n"; if (ec || ret.is_method_error()) { std::cerr << "error with async_send\n"; return; } GetSubTreeType data; ret.read(data); for (auto& item : data) { std::cout << item.first << "\n"; } }); conn->async_method_call( [](boost::system::error_code ec, GetSubTreeType& subtree) { std::cout << "async_method_call callback\n"; if (ec) { std::cerr << "error with async_method_call\n"; return; } for (auto& item : subtree) { std::cout << item.first << "\n"; } }, "xyz.openbmc_project.ObjectMapper", "/xyz/openbmc_project/object_mapper", "xyz.openbmc_project.ObjectMapper", "GetSubTree", "/org/openbmc/control", 2, std::vector<std::string>()); // sd_events work too using the default event loop phosphor::Timer t1([]() { std::cerr << "*** tock ***\n"; }); t1.start(std::chrono::microseconds(1000000)); phosphor::Timer t2([]() { std::cerr << "*** tick ***\n"; }); t2.start(std::chrono::microseconds(500000), true); // add the sd_event wrapper to the io object sdbusplus::asio::sd_event_wrapper sdEvents(io); // set up a client to make an async call to the server // using coroutines (userspace cooperative multitasking) boost::asio::spawn(io, [conn](boost::asio::yield_context yield) { do_start_async_method_call_one(conn, yield); }); boost::asio::spawn(io, [conn](boost::asio::yield_context yield) { do_start_async_ipmi_call(conn, yield); }); boost::asio::spawn(io, [conn](boost::asio::yield_context yield) { do_start_async_to_yield(conn, yield); }); conn->async_method_call( [](boost::system::error_code ec, int32_t testValue) { if (ec) { std::cerr << "TestYieldFunction returned error with " "async_method_call (ec = " << ec << ")\n"; return; } std::cout << "TestYieldFunction return " << testValue << "\n"; }, "xyz.openbmc_project.asio-test", "/xyz/openbmc_project/test", "xyz.openbmc_project.test", "TestYieldFunction", int32_t(41)); io.run(); return 0; }
void CNet::DealSendEvent(struct iocp_event * pEvent) { if (ERROR_SUCCESS == pEvent->nerron) { CConnection * pCConnection = (CConnection *) (pEvent->p); pCConnection->stream.out(pEvent->ioBytes); s32 nSize = pCConnection->stream.size(); if (0 == nSize) { if (pCConnection->bShutdown) { SafeShutdwon(pEvent, CSD_SEND); } else { free_event(pEvent); } CAutoLock(&(pCConnection->sdlock)); pCConnection->nSDTags = pCConnection->nSDTags | CSD_SEND; } else { s32 err; pEvent->wbuf.buf = (char *)pCConnection->stream.buff(); pEvent->wbuf.len = nSize; if (ERROR_NO_ERROR != async_send(pEvent, &err, pEvent->p)) { SafeShutdwon(pEvent, CSD_SEND); } } } else { SafeShutdwon(pEvent, CSD_SEND); } }
void CNet::CSend(const s32 nConnectID, const void * pData, const s32 nSize) { CConnection * pCConnection = m_ConnectPool[nConnectID]; if (pCConnection->bShutdown) { return; } s32 size = pCConnection->stream.size(); pCConnection->stream.in(pData, nSize); if (0 == size) { struct iocp_event * pEvent = malloc_event(); ASSERT(pEvent); pEvent->event = EVENT_ASYNC_SEND; pEvent->wbuf.buf = (char *) pCConnection->stream.buff(); pEvent->wbuf.len = nSize; pEvent->p = pCConnection; pEvent->s = pCConnection->s; s32 err; { CAutoLock(&(pCConnection->sdlock)); pCConnection->nSDTags = pCConnection->nSDTags & CSD_RECV; } if (ERROR_NO_ERROR != async_send(pEvent, &err, pCConnection)) { //some problem in here must be killed// like 10054 NET_DEBUG("async_send failed, err %d", err); SafeShutdwon(pEvent, CSD_SEND); } } }
void AsyncConnection::HandleConnect(const MessagePtr msg, const boost::system::error_code& e) { if (!e) { m_app->HandleConnect(Connection::shared_from_this()); Start(); async_send(boost::asio::buffer(msg->Data(), msg->Length()), m_strand.wrap(boost::bind(&AsyncConnection::HandleSend, this, msg, boost::asio::placeholders::error, false))); } }
void AsyncConnection::Send(const MessagePtr msg) { if (is_open()) { async_send(boost::asio::buffer(msg->Data(), msg->Length()), m_strand.wrap(boost::bind(&AsyncConnection::HandleSend, this, msg, boost::asio::placeholders::error, true))); } else { boost::asio::ip::tcp::endpoint endpoint(boost::asio::ip::address::from_string(m_ip), m_port); async_connect(endpoint, m_strand.wrap(boost::bind(&AsyncConnection::HandleConnect, this, msg, boost::asio::placeholders::error))); } }
void mux_poller::poll() { static const char *funcname {"snmp::mux_poller::poll"}; if (0 == tasks.size()) return; int fds, block; fd_set fdset; timeval timeout; taskdata::iterator it = tasks.begin(); for (unsigned active_hosts;;) { active_hosts = sessions.size(); if (active_hosts < max_hosts) { if (0 == active_hosts and it == tasks.end()) return; unsigned delta = max_hosts - active_hosts; void *sessp; for (unsigned i = 0; it != tasks.end() and i < delta; ++i, ++it) { polltask &task = it->second; sessp = init_snmp_session(it->first.c_str(), task.community.c_str(), task.version, callback_wrap, static_cast<void *>(&task)); sessions.emplace_front(sessp); task.pdata = &(sessions.front()); try { async_send(sessp, snmp_clone_pdu(task.request)); } catch (snmprun_error &error) { throw snmprun_error {errtype::runtime, funcname, "poll failed: %s", error.what()}; } } } fds = block = 0; FD_ZERO(&fdset); for (auto &sess : sessions) snmp_sess_select_info(sess.sessp, &fds, &fdset, &timeout, &block); if (0 > (fds = select(fds, &fdset, nullptr, nullptr, &timeout))) throw snmprun_error {errtype::runtime, funcname, "select() failed: %s", strerror(errno)}; if (fds) { for (auto &sess : sessions) snmp_sess_read(sess.sessp, &fdset); } else { for (auto &sess : sessions) snmp_sess_timeout(sess.sessp); } sessions.remove_if([](const polldata &p) { return (pollstate::finished == p.state); }); } }
//!Main MPI loop-only one can exist within a program. void mpi_controller::send_recv(){ static int ms_sleep_for=100; //don't combine into one big statement to ensure compiler doesn't optimize away calls bool work_done = async_recv(); work_done = handle_events() | work_done; work_done = async_send() | work_done; //If there was nothing to send or recieve, sleep for a few seconds and try again //This frees up the core for something else if(!work_done){ ms_sleep_for = std::min(int(ms_sleep_for*1.5), 5000); std::this_thread::sleep_for(std::chrono::milliseconds(ms_sleep_for)); } else{ ms_sleep_for=100; } }
/** * Flush as many messages as possible without blocking from * the send q to the specified node. */ static void flush_send_q_node(long node) { while (TCGMSG_proc_info[node].sendq) { if (!async_send(TCGMSG_proc_info[node].sendq)) { /* Send is incomplete ... stop processing this q*/ break; } else { SendQEntry *tmp = TCGMSG_proc_info[node].sendq; TCGMSG_proc_info[node].sendq = (SendQEntry *) TCGMSG_proc_info[node].sendq->next; if (tmp->free_buf_on_completion) (void) free(tmp->buf); tmp->active = false; /* Matches NewSendQEntry() */ } } }
static void flush_send_q_node(long node) /* Flush as many messages as possible without blocking from the send q to the specified node. */ { while (TCGMSG_proc_info[node].sendq) { if (!async_send(TCGMSG_proc_info[node].sendq)) { /* Send is incomplete ... stop processing this q*/ break; } else { SendQEntry *tmp = TCGMSG_proc_info[node].sendq; TCGMSG_proc_info[node].sendq = (SendQEntry *) TCGMSG_proc_info[node].sendq->next; if (tmp->free_buf_on_completion) (void) free(tmp->buf); tmp->active = false; /* Matches NewSendQEntry() */ } } }
void test_send_receive_async_threads() { boost::system::error_code ecc; size_t btc = 0; boost::system::error_code ecb; size_t btb = 0; { std::array<char, 2> a; std::array<char, 2> b; std::array<boost::asio::mutable_buffer, 2> rcv_bufs = {{ boost::asio::buffer(a), boost::asio::buffer(b) }}; boost::asio::io_service ios; auto s = azmq::thread::fork(ios, [&](azmq::socket & ss) { ss.async_receive(rcv_bufs, [&](boost::system::error_code const& ec, size_t bytes_transferred) { ecb = ec; btb = bytes_transferred; ios.stop(); }, ZMQ_RCVMORE); ss.get_io_service().run(); }); s.async_send(snd_bufs, [&] (boost::system::error_code const& ec, size_t bytes_transferred) { ecc = ec; btc = bytes_transferred; }, ZMQ_SNDMORE); boost::asio::io_service::work w(ios); ios.run(); } BOOST_ASSERT_MSG(!ecc, "!ecc"); BOOST_ASSERT_MSG(btc == 4, "btc != 4"); BOOST_ASSERT_MSG(!ecb, "!ecb"); BOOST_ASSERT_MSG(btb == 4, "btb != 4"); }
socket1.send(buffer(const_char_buffer)); socket1.send(null_buffers()); socket1.send(buffer(mutable_char_buffer), in_flags); socket1.send(buffer(const_char_buffer), in_flags); socket1.send(null_buffers(), in_flags); socket1.send(buffer(mutable_char_buffer), in_flags, ec); socket1.send(buffer(const_char_buffer), in_flags, ec); socket1.send(null_buffers(), in_flags, ec); socket1.async_send(buffer(mutable_char_buffer), send_handler()); socket1.async_send(buffer(const_char_buffer), send_handler()); socket1.async_send(null_buffers(), send_handler()); socket1.async_send(buffer(mutable_char_buffer), in_flags, send_handler()); socket1.async_send(buffer(const_char_buffer), in_flags, send_handler()); socket1.async_send(null_buffers(), in_flags, send_handler()); int i4 = socket1.async_send(buffer(mutable_char_buffer), lazy); (void)i4; int i5 = socket1.async_send(buffer(const_char_buffer), lazy); (void)i5; int i6 = socket1.async_send(null_buffers(), lazy); (void)i6; int i7 = socket1.async_send(buffer(mutable_char_buffer), in_flags, lazy); (void)i7; int i8 = socket1.async_send(buffer(const_char_buffer), in_flags, lazy); (void)i8; int i9 = socket1.async_send(null_buffers(), in_flags, lazy); (void)i9; socket1.send_to(buffer(mutable_char_buffer), ip::udp::endpoint(ip::udp::v4(), 0)); socket1.send_to(buffer(mutable_char_buffer),
int ploop_copy_send(struct ploop_copy_send_param *arg) { struct delta idelta = { .fd = -1 }; int tracker_on = 0; int fs_frozen = 0; int devfd = -1; int mntfd = -1; int ret = 0; char *send_from = NULL; char *format = NULL; void *iobuf[2] = {}; int blocksize; __u64 cluster; __u64 pos; __u64 iterpos; __u64 trackpos; __u64 trackend; __u64 xferred; int iter; struct ploop_track_extent e; int i; pthread_t send_th = 0; struct send_data sd = { .mutex = PTHREAD_MUTEX_INITIALIZER, .cond = PTHREAD_COND_INITIALIZER, .cond_sent = PTHREAD_COND_INITIALIZER, }; if (!arg) return SYSEXIT_PARAM; sd.fd = arg->ofd; sd.is_pipe = is_fd_pipe(arg->ofd); if (sd.is_pipe < 0) { ploop_err(0, "Invalid output fd %d: must be a file, " "a pipe or a socket", arg->ofd); return SYSEXIT_PARAM; } if (arg->feedback_fd >= 0 && is_fd_pipe(arg->feedback_fd) != 1) { ploop_err(errno, "Invalid feedback fd %d: must be " "a pipe or a socket", arg->feedback_fd); return SYSEXIT_PARAM; } /* If data is to be send to stdout or stderr, * we have to disable logging to appropriate fd. * * As currently there's no way to disable just stderr, * so in this case we have to disable stdout as well. */ if (arg->ofd == STDOUT_FILENO) ploop_set_verbose_level(PLOOP_LOG_NOSTDOUT); else if (arg->ofd == STDERR_FILENO) ploop_set_verbose_level(PLOOP_LOG_NOCONSOLE); devfd = open(arg->device, O_RDONLY); if (devfd < 0) { ploop_err(errno, "Can't open device %s", arg->device); ret = SYSEXIT_DEVICE; goto done; } mntfd = open_mount_point(arg->device); if (mntfd < 0) { /* Error is printed by open_mount_point() */ ret = SYSEXIT_OPEN; goto done; } ret = get_image_info(arg->device, &send_from, &format, &blocksize); if (ret) goto done; cluster = S2B(blocksize); ret = SYSEXIT_MALLOC; for (i = 0; i < 2; i++) if (p_memalign(&iobuf[i], 4096, cluster)) goto done; ret = complete_running_operation(NULL, arg->device); if (ret) goto done; ret = ioctl_device(devfd, PLOOP_IOC_TRACK_INIT, &e); if (ret) goto done; tracker_on = 1; if (open_delta_simple(&idelta, send_from, O_RDONLY|O_DIRECT, OD_NOFLAGS)) { ret = SYSEXIT_OPEN; goto done; } ret = pthread_create(&send_th, NULL, send_thread, &sd); if (ret) { ploop_err(ret, "Can't create send thread"); ret = SYSEXIT_SYS; goto done; } ploop_log(-1, "Sending %s", send_from); trackend = e.end; for (pos = 0; pos < trackend; ) { int n; trackpos = pos + cluster; ret = ioctl_device(devfd, PLOOP_IOC_TRACK_SETPOS, &trackpos); if (ret) goto done; n = do_pread(cluster, pos); if (n == 0) /* EOF */ break; async_send(n, pos); pos += n; } /* First copy done */ iter = 1; iterpos = 0; xferred = 0; for (;;) { int err; err = ioctl(devfd, PLOOP_IOC_TRACK_READ, &e); if (err == 0) { //fprintf(stderr, "TRACK %llu-%llu\n", e.start, e.end); fflush(stdout); if (e.end > trackend) trackend = e.end; if (e.start < iterpos) iter++; iterpos = e.end; xferred += e.end - e.start; for (pos = e.start; pos < e.end; ) { int n; int copy = e.end - pos; if (copy > cluster) copy = cluster; if (pos + copy > trackpos) { trackpos = pos + copy; if (ioctl(devfd, PLOOP_IOC_TRACK_SETPOS, &trackpos)) { ploop_err(errno, "PLOOP_IOC_TRACK_SETPOS"); ret = SYSEXIT_DEVIOC; goto done; } } n = do_pread(copy, pos); if (n == 0) { ploop_err(0, "Unexpected EOF"); ret = SYSEXIT_READ; goto done; } async_send(n, pos); pos += n; } } else { if (errno == EAGAIN) /* no more dirty blocks */ break; ploop_err(errno, "PLOOP_IOC_TRACK_READ"); ret = SYSEXIT_DEVIOC; goto done; } if (iter > 10 || (iter > 1 && xferred > trackend)) break; } /* Live iterative transfers are done. Either we transferred * everything or iterations did not converge. In any case * now we must suspend VE disk activity. Now it is just * call of an external program (something sort of * "killall -9 writetest; sleep 1; umount /mnt2"), actual * implementation must be intergrated to vzctl/vzmigrate * and suspend VE with subsequent fsyncing FS. */ /* Send the sync command to receiving side. Since older ploop * might be present on the other side, we need to not break the * backward compatibility, so just send the first few (SYNC_MARK) * bytes of delta file contents. New ploop_receive() interprets * this as "sync me" command, while the old one just writes those * bytes which is useless but harmless. */ if (sd.is_pipe) { char buf[LEN_STATUS + 1] = {}; ret = do_pread(4096, 0); if (ret < SYNC_MARK) { ploop_err(errno, "Short read"); ret = SYSEXIT_READ; goto done; } TS("SEND 0 %d (sync)", SYNC_MARK); async_send(SYNC_MARK, 0); /* Now we should wait for the other side to finish syncing * before freezing the container, to optimize CT frozen time. */ if (arg->feedback_fd < 0) { /* No descriptor to receive a response back is given. * As ugly as it looks, let's just sleep for some time * hoping the other side will finish sync. */ TS("SLEEP 5"); sleep(5); goto sync_done; } /* Wait for feedback from the receiving side */ /* FIXME: use select/poll with a timeout */ if (read(arg->feedback_fd, buf, LEN_STATUS) != LEN_STATUS) { ploop_err(errno, "Can't read feedback"); ret = SYSEXIT_PROTOCOL; goto done; } if (strncmp(buf, STATUS_OK, LEN_STATUS) == 0) { goto sync_done; } else if (strncmp(buf, STATUS_FAIL, LEN_STATUS) == 0) { ploop_err(0, "Remote side reported sync failure"); ret = SYSEXIT_FSYNC; goto done; } else { ploop_err(0, "Got back feedback: %s", buf); ret = SYSEXIT_PROTOCOL; goto done; } } else { /* Writing to local file */ fdatasync(arg->ofd); } sync_done: /* Freeze the container */ TS("FLUSH"); ret = run_cmd(arg->flush_cmd); if (ret) goto done; /* Sync fs */ TS("SYNCFS"); if (sys_syncfs(mntfd)) { ploop_err(errno, "syncfs() failed"); ret = SYSEXIT_FSYNC; goto done; } /* Flush journal and freeze fs (this also clears the fs dirty bit) */ TS("FIFREEZE"); ret = ioctl_device(mntfd, FIFREEZE, 0); if (ret) goto done; fs_frozen = 1; TS("IOC_SYNC"); ret = ioctl_device(devfd, PLOOP_IOC_SYNC, 0); if (ret) goto done; iter = 1; iterpos = 0; for (;;) { int err; struct ploop_track_extent e; err = ioctl(devfd, PLOOP_IOC_TRACK_READ, &e); if (err == 0) { __u64 pos; //fprintf(stderr, "TRACK %llu-%llu\n", e.start, e.end); fflush(stdout); if (e.end > trackend) trackend = e.end; if (e.start < iterpos) iter++; iterpos = e.end; for (pos = e.start; pos < e.end; ) { int n; int copy = e.end - pos; if (copy > cluster) copy = cluster; if (pos + copy > trackpos) { trackpos = pos + copy; ret = ioctl(devfd, PLOOP_IOC_TRACK_SETPOS, &trackpos); if (ret) goto done; } TS("READ %llu %d", pos, copy); n = do_pread(copy, pos); if (n == 0) { ploop_err(0, "Unexpected EOF"); ret = SYSEXIT_READ; goto done; } TS("SEND %llu %d", pos, n); async_send(n, pos); pos += n; } } else { if (errno == EAGAIN) break; ploop_err(errno, "PLOOP_IOC_TRACK_READ"); ret = SYSEXIT_DEVIOC; goto done; } if (iter > 2) { ploop_err(0, "Too many iterations on frozen FS, aborting"); ret = SYSEXIT_LOOP; goto done; } } /* Must clear dirty flag on ploop1 image. */ if (strcmp(format, "ploop1") == 0) { int n; struct ploop_pvd_header *vh; TS("READ 0 4096"); n = do_pread(4096, 0); if (n < SECTOR_SIZE) { ploop_err(errno, "Short read"); ret = SYSEXIT_READ; goto done; } vh = iobuf[i]; vh->m_DiskInUse = 0; TS("SEND 0 %d (1st sector)", SECTOR_SIZE); async_send(SECTOR_SIZE, 0); } TS("IOCTL TRACK_STOP"); ret = ioctl(devfd, PLOOP_IOC_TRACK_STOP, 0); if (ret) goto done; tracker_on = 0; TS("SEND 0 0 (close)"); async_send(0, 0); pthread_join(send_th, NULL); send_th = 0; done: if (send_th) pthread_cancel(send_th); if (fs_frozen) (void)ioctl_device(mntfd, FITHAW, 0); if (tracker_on) (void)ioctl_device(devfd, PLOOP_IOC_TRACK_ABORT, 0); free(iobuf[0]); free(iobuf[1]); if (devfd >=0) close(devfd); if (mntfd >=0) close(mntfd); free(send_from); if (idelta.fd >= 0) close_delta(&idelta); TS("DONE"); return ret; } #undef do_pread #undef async_send /* Deprecated, please use ploop_copy_send() instead */ int ploop_send(const char *device, int ofd, const char *flush_cmd, int is_pipe) { struct ploop_copy_send_param s = { .device = device, .ofd = ofd, .flush_cmd = flush_cmd, }; return ploop_copy_send(&s); }
ssize_t MulticastSendStrategy::send_bytes_i(const iovec iov[], int n) { return (this->link_->config()->async_send() ? async_send(iov, n) : sync_send(iov, n)); }