int CZmqThread::init_socket(){ int rc; void *ctx=zmq_gcontext_get(); m_socket = zmq_socket (ctx, ZMQ_SUB); if (!m_socket) { printf ("error in zmq_socket: %s\n", zmq_strerror (errno)); return -1; } rc = zmq_connect (m_socket, ( char *)m_host.toUtf8().data()); if (rc != 0) { printf ("error in zmq_connect: %s\n", zmq_strerror (errno)); return -1; } rc=zmq_setsockopt (m_socket, ZMQ_SUBSCRIBE, 0,0); if (rc != 0) { printf ("error in zmq_setoption : %s\n", zmq_strerror (errno)); return -1; } init_msg(); return (0); }
void safeSend(zmq::socket_t &sock, const char *buf, size_t buflen) { char tnam[100]; int pgn_rc = pthread_getname_np(pthread_self(),tnam, 100); assert(pgn_rc == 0); //if (buflen>10){FileLogger fl(program_name); fl.f() << tnam << " sending " << buf << "\n"; } while (!MessagingInterface::aborted()) { try { zmq::message_t msg(buflen); memcpy(msg.data(), buf, buflen ); sock.send(msg); break; } catch (zmq::error_t) { if (zmq_errno() != EINTR && zmq_errno() != EAGAIN) { { FileLogger fl(program_name); fl.f() << tnam << " safeSend error " << errno << " " << zmq_strerror(errno) << "\n"; } if (zmq_errno() == EFSM) { usleep(1000); throw; } usleep(10); continue; } else { std::cerr << tnam << " safeSend error " << errno << " " << zmq_strerror(errno) << "\n"; usleep(10); } } } }
static VALUE socket_send (VALUE self_, VALUE msg_, VALUE flags_) { assert (DATA_PTR (self_)); Check_Type (msg_, T_STRING); zmq_msg_t msg; int rc = zmq_msg_init_size (&msg, RSTRING_LEN (msg_)); if (rc != 0) { rb_raise (rb_eRuntimeError, zmq_strerror (errno)); return Qnil; } memcpy (zmq_msg_data (&msg), RSTRING_PTR (msg_), RSTRING_LEN (msg_)); rc = zmq_send (DATA_PTR (self_), &msg, NUM2INT (flags_)); if (rc != 0 && errno == EAGAIN) { rc = zmq_msg_close (&msg); assert (rc == 0); return Qfalse; } if (rc != 0) { rb_raise (rb_eRuntimeError, zmq_strerror (errno)); rc = zmq_msg_close (&msg); assert (rc == 0); return Qnil; } rc = zmq_msg_close (&msg); assert (rc == 0); return Qtrue; }
COLD AsyncJobRouter::AsyncJobRouter(void* ctxArg, Tablespace& tablespaceArg) : AbstractFrameProcessor(ctxArg, ZMQ_PULL, ZMQ_PUSH, "Async job router"), processSocketMap(), processThreadMap(), apTerminationInfo(), scrubJobsRequested(), apidGenerator("next-apid.txt"), ctx(ctxArg), tablespace(tablespaceArg) { //Print warnings if not using lockfree atomics std::atomic<bool> boolAtomic; std::atomic<unsigned int> uintAtomic; std::atomic<uint64_t> uint64Atomic; if(!atomic_is_lock_free(&boolAtomic)) { logger.warn("atomic<bool> is not lockfree, some operations might be slower than expected"); } if(!atomic_is_lock_free(&uintAtomic)) { logger.warn("atomic<unsigned int> is not lockfree, some operations might be slower than expected"); } if(!atomic_is_lock_free(&uint64Atomic)) { logger.warn("atomic<uint64_t> is not lockfree, some operations might be slower than expected"); } //Connect the socket that is used by the send() member function if(unlikely(zmq_connect(processorInputSocket, asyncJobRouterAddr) == -1)) { logger.critical("Failed to bind processor input socket: " + std::string(zmq_strerror(errno))); } //Connect the socket that is used to proxy requests to the external req/rep socket if(unlikely(zmq_connect(processorOutputSocket, externalRequestProxyEndpoint) == -1)) { logger.critical("Failed to bind processor output socket: " + std::string(zmq_strerror(errno))); } logger.debug("Asynchronous job router starting up"); }
/** * Initialize the context, socket and connect to the bound address * @return * true when successful */ bool BoomStick::Initialize() { if (nullptr != mCtx) { return true; } mCtx = GetNewContext(); if (nullptr == mCtx) { LOG(WARNING) << "queue error " << zmq_strerror(zmq_errno()); return false; } mChamber = GetNewSocket(mCtx); // The memory in this pointer is managed by the context and should not be deleted if (nullptr == mChamber) { LOG(WARNING) << "queue error " << zmq_strerror(zmq_errno()); zctx_destroy(&mCtx); mCtx = nullptr; return false; } if (!ConnectToBinding(mChamber, mBinding)) { zctx_destroy(&mCtx); mChamber = nullptr; mCtx = nullptr; return false; } return true; }
void _addConnection( const std::string& zmqURI ) { _subscribers[zmqURI] = zmq_socket( _context, ZMQ_SUB ); if( zmq_connect( _subscribers[zmqURI], zmqURI.c_str( )) == -1 ) { zmq_close( _subscribers[zmqURI] ); _subscribers.erase( zmqURI ); LBTHROW( std::runtime_error( "Cannot connect subscriber to " + zmqURI + ", got " + zmq_strerror( zmq_errno( )))); } if( zmq_setsockopt( _subscribers[zmqURI], ZMQ_SUBSCRIBE, "", 0 ) == -1 ) { zmq_close( _subscribers[zmqURI] ); _subscribers.erase( zmqURI ); LBTHROW( std::runtime_error( std::string( "Cannot set subscriber, got " ) + zmq_strerror( zmq_errno( )))); } zmq_pollitem_t entry; entry.socket = _subscribers[zmqURI]; entry.events = ZMQ_POLLIN; _entries.push_back( entry ); }
Return<std::string> Zmq::init(const int type) { std::stringstream ss; ss << port; endPoint = address + ":" + ss.str(); std::cout << "endPoint " << endPoint << std::endl; if (Zmq::context == NULL){ if ((Zmq::context = zmq_init(1)) == NULL) { return Return<std::string> (false, "Failed to init zmq with 1 thread (" + std::string(zmq_strerror(zmq_errno())) +")"); } } if ((socket = zmq_socket(Zmq::context, type)) == NULL){ return Return<std::string> (false, "Failed to to create socket (" + std::string(zmq_strerror(zmq_errno())) +")"); } return true; }
bool Van::connect(const Node &node) { CHECK(node.has_id()) << node.ShortDebugString(); CHECK(node.has_port()) << node.ShortDebugString(); CHECK(node.has_hostname()) << node.ShortDebugString(); NodeID id = node.id(); // If the node.id is the same as myNode_.id, then we need to update current // node information. This new node information generally comes from scheduler. if (id == myNode_.id()) { myNode_ = node; } if (senders_.find(id) != senders_.end()) { return true; } void *sender = zmq_socket(context_, ZMQ_DEALER); CHECK(sender != nullptr) << zmq_strerror(errno); std::string myId = myNode_.id(); zmq_setsockopt(sender, ZMQ_IDENTITY, myId.data(), myId.size()); std::string addr = "tcp://" + node.hostname() + ":" + std::to_string(node.port()); if (FLAGS_local) { addr = "ipc:///tmp/" + node.id(); } if (zmq_connect(sender, addr.c_str()) != 0) { LOG(WARNING) << "connect to " + addr + " failed: " + zmq_strerror(errno); return false; } senders_[id] = sender; hostnames_[id] = node.hostname(); VLOG(1) << "Connect to " << id << " [" << addr << "]"; return true; }
void rpc_service_start(rpc_service_t *service, struct ev_loop *ev) { int rc; printf("Starting RPC service on %s\n", service->address); void *socket = zmq_socket(service->zmq, ZMQ_REP); insist(socket != NULL, "zmq_socket returned NULL. zmq error(%d): %s", zmq_errno(), zmq_strerror(zmq_errno())); rc = zmq_bind(socket, service->address); insist(rc == 0, "zmq_bind(\"%s\") returned %d (I expected: 0). zmq error(%d): %s", service->address, rc, zmq_errno(), zmq_strerror(zmq_errno())); /* TODO(sissel): Turn this 'get fd' into a method */ int socket_fd; size_t len = sizeof(socket_fd); rc = zmq_getsockopt(socket, ZMQ_FD, &socket_fd, &len); insist(rc == 0, "zmq_getsockopt(ZMQ_FD) expected to return 0, but got %d", rc); rpc_service_register(service, "list_methods", rpc_m_list_methods, service); rpc_service_register(service, "echo", rpc_m_echo, NULL); service->socket = socket; service->ev = ev; ev_io_init(&service->io, rpc_service_poll, socket_fd, EV_READ); ev_io_start(service->ev, &service->io); printf("RPC/API started\n"); } /* rpc_service_start */
SEXP R_zmq_msg_send(SEXP R_rmsg, SEXP R_socket, SEXP R_flags){ int C_rmsg_length = LENGTH(R_rmsg); int C_ret = -1, C_errno, C_flags = INTEGER(R_flags)[0]; void *C_socket = R_ExternalPtrAddr(R_socket); zmq_msg_t msg; if(C_socket != NULL){ C_ret = zmq_msg_init_size(&msg, C_rmsg_length); if(C_ret == -1){ C_errno = zmq_errno(); REprintf("R_zmq_msg_init_size errno: %d strerror: %s\n", C_errno, zmq_strerror(C_errno)); } memcpy(zmq_msg_data(&msg), RAW(R_rmsg), C_rmsg_length); C_ret = zmq_msg_send(&msg, C_socket, C_flags); if(C_ret == -1){ C_errno = zmq_errno(); REprintf("R_zmq_msg_send errno: %d strerror: %s\n", C_errno, zmq_strerror(C_errno)); } C_ret = zmq_msg_close(&msg); if(C_ret == -1){ C_errno = zmq_errno(); REprintf("R_zmq_msg_close errno: %d strerror: %s\n", C_errno, zmq_strerror(C_errno)); } } else{ REprintf("R_zmq_send: C_socket is not available.\n"); } return(R_NilValue); } /* End of R_zmq_msg_send(). */
int CZmqThread::block_rcv_socket(QString & s){ QString msg; int rc; int64_t more; size_t more_size = sizeof (more); s=""; do { rc = zmq_recvmsg (m_socket, &m_msg, ZMQ_DONTWAIT); if (rc < 0) { if ( zmq_errno () == EAGAIN ){ return (1); } printf ("error in zmq_rcv : %s\n", zmq_strerror (errno)); return(-1); }else{ s +=(QString::fromLocal8Bit((char *)zmq_msg_data (&m_msg), (int)zmq_msg_size (&m_msg) ) ); rc = zmq_getsockopt (m_socket, ZMQ_RCVMORE, &more, &more_size); if (rc != 0) { printf ("error in zmq_getsockopt : %s\n", zmq_strerror (errno)); return(-1); } } } while ( more ); return (0); }
static rsRetVal initZMQ(instanceData* pData) { DEFiRet; /* create the context if necessary. */ if (NULL == s_context) { zsys_handler_set(NULL); s_context = zctx_new(); if (s_workerThreads > 0) zctx_set_iothreads(s_context, s_workerThreads); } pData->socket = zsocket_new(s_context, pData->type); if (NULL == pData->socket) { errmsg.LogError(0, RS_RET_NO_ERRCODE, "omzmq3: zsocket_new failed for %s: %s", pData->description, zmq_strerror(errno)); ABORT_FINALIZE(RS_RET_NO_ERRCODE); } /* use czmq defaults for these, unless set to non-default values */ if(pData->identity) zsocket_set_identity(pData->socket, (char*)pData->identity); if(pData->sndBuf > -1) zsocket_set_sndbuf(pData->socket, pData->sndBuf); if(pData->rcvBuf > -1) zsocket_set_sndbuf(pData->socket, pData->rcvBuf); if(pData->linger > -1) zsocket_set_linger(pData->socket, pData->linger); if(pData->backlog > -1) zsocket_set_backlog(pData->socket, pData->backlog); if(pData->sndTimeout > -1) zsocket_set_sndtimeo(pData->socket, pData->sndTimeout); if(pData->rcvTimeout > -1) zsocket_set_rcvtimeo(pData->socket, pData->rcvTimeout); if(pData->maxMsgSize > -1) zsocket_set_maxmsgsize(pData->socket, pData->maxMsgSize); if(pData->rate > -1) zsocket_set_rate(pData->socket, pData->rate); if(pData->recoveryIVL > -1) zsocket_set_recovery_ivl(pData->socket, pData->recoveryIVL); if(pData->multicastHops > -1) zsocket_set_multicast_hops(pData->socket, pData->multicastHops); if(pData->reconnectIVL > -1) zsocket_set_reconnect_ivl(pData->socket, pData->reconnectIVL); if(pData->reconnectIVLMax > -1) zsocket_set_reconnect_ivl_max(pData->socket, pData->reconnectIVLMax); if(pData->ipv4Only > -1) zsocket_set_ipv4only(pData->socket, pData->ipv4Only); if(pData->affinity != 1) zsocket_set_affinity(pData->socket, pData->affinity); if(pData->rcvHWM > -1) zsocket_set_rcvhwm(pData->socket, pData->rcvHWM); if(pData->sndHWM > -1) zsocket_set_sndhwm(pData->socket, pData->sndHWM); /* bind or connect to it */ if (pData->action == ACTION_BIND) { /* bind asserts, so no need to test return val here which isn't the greatest api -- oh well */ if(-1 == zsocket_bind(pData->socket, (char*)pData->description)) { errmsg.LogError(0, RS_RET_NO_ERRCODE, "omzmq3: bind failed for %s: %s", pData->description, zmq_strerror(errno)); ABORT_FINALIZE(RS_RET_NO_ERRCODE); } DBGPRINTF("omzmq3: bind to %s successful\n",pData->description); } else { if(-1 == zsocket_connect(pData->socket, (char*)pData->description)) { errmsg.LogError(0, RS_RET_NO_ERRCODE, "omzmq3: connect failed for %s: %s", pData->description, zmq_strerror(errno)); ABORT_FINALIZE(RS_RET_NO_ERRCODE); } DBGPRINTF("omzmq3: connect to %s successful", pData->description); } finalize_it: RETiRet; }
/** * Send a message, but leave the reply on the socket * @param uuid * A unique identifier for this send * @param command * The string that will be sent * @return * If the send was successful */ bool BoomStick::SendAsync(const std::string& uuid, const std::string& command) { if (0 == mUtilizedThread) { mUtilizedThread = pthread_self(); } else { CHECK(pthread_self() == mUtilizedThread); } if (nullptr == mCtx || nullptr == mChamber) { return false; } bool success = true; if (FindPendingUuid(uuid)) { return true; } zmsg_t* msg = zmsg_new(); if (zmsg_addmem(msg, uuid.c_str(), uuid.size()) < 0) { success = false; LOG(WARNING) << "queue error " << zmq_strerror(zmq_errno()); } else if (zmsg_addmem(msg, command.c_str(), command.size()) < 0) { success = false; LOG(WARNING) << "queue error " << zmq_strerror(zmq_errno()); } else { zmq_pollitem_t items[1]; items[0].socket = mChamber; items[0].events = ZMQ_POLLOUT; int rc = zmq_poll(items, 1, 0); if (0 == rc) { zmq_poll(items,1,100); } if (rc < 0) { success = false; LOG(WARNING) << "Queue error, cannot poll for status"; } else if (1 == rc) { if ((items[0].revents & ZMQ_POLLOUT) != ZMQ_POLLOUT) { LOG(WARNING) << "Queue error, cannot send messages the queue is full"; std::this_thread::sleep_for(std::chrono::milliseconds(100)); success = false; } else if (zmsg_send(&msg, mChamber) == 0) { success = true; mPendingReplies[uuid] = std::time(NULL); } else { LOG(WARNING) << "queue error " << zmq_strerror(zmq_errno()); success = false; } } else { LOG(WARNING) << "Queue error, timeout waiting for queue to be ready"; success = false; } } if (msg) { zmsg_destroy(&msg); } return success; }
int main(){ zmq::context_t context(1); zmq::socket_t client (context, ZMQ_PUSH); client.setsockopt( ZMQ_IDENTITY, "B", 1); client.connect("tcp://127.0.0.1:5560"); std::cout << "connected!" << std::endl; sleep(1); void *watch; unsigned long elapsed; unsigned long throughput; int message_count = 1; watch = zmq_stopwatch_start (); /*for( int i = 0; i < message_count; i++){ //s_sendmore (client, "A"); //s_sendmore(client, ""); s_send (client, "This is the workload"); //s_dump(client); //std::string string = s_recv (client); //zmq::message_t message; //client.recv(&message); }*/ for (i = 0; i != message_count; i++) { rc = zmq_msg_init_size (&msg, message_size); if (rc != 0) { printf ("error in zmq_msg_init_size: %s\n", zmq_strerror (errno)); return -1; } #if defined ZMQ_MAKE_VALGRIND_HAPPY memset (zmq_msg_data (&msg), 0, message_size); #endif rc = zmq_sendmsg (s, &msg, 0); if (rc < 0) { printf ("error in zmq_sendmsg: %s\n", zmq_strerror (errno)); return -1; } rc = zmq_msg_close (&msg); if (rc != 0) { printf ("error in zmq_msg_close: %s\n", zmq_strerror (errno)); return -1; } } elapsed = zmq_stopwatch_stop (watch); throughput = (unsigned long) ((double) message_count / (double) elapsed * 1000000); printf ("mean throughput: %d [msg/s]\n", (int) throughput); sleep(1); return 0; }
/** * @internal * Setup ZeroMq for receiving notifications. * * @param data plugin data containing context, socket, etc. */ void elektraZeroMqRecvSetup (ElektraZeroMqRecvPluginData * data) { // create zmq context if (!data->zmqContext) { data->zmqContext = zmq_ctx_new (); if (data->zmqContext == NULL) { ELEKTRA_LOG_WARNING ("zmq_ctx_new failed %s", zmq_strerror (zmq_errno ())); return; } } // create publish socket if (!data->zmqSubscriber) { data->zmqSubscriber = zmq_socket (data->zmqContext, ZMQ_SUB); if (data->zmqSubscriber == NULL) { ELEKTRA_LOG_WARNING ("zmq_socket failed %s", zmq_strerror (zmq_errno ())); zmq_close (data->zmqSubscriber); return; } // subscribe to notifications char * keyCommitType = "Commit"; if (zmq_setsockopt (data->zmqSubscriber, ZMQ_SUBSCRIBE, keyCommitType, elektraStrLen (keyCommitType)) != 0) { ELEKTRA_LOG_WARNING ("failed to subscribe to %s messages", keyCommitType); } // connect to endpoint int result = zmq_connect (data->zmqSubscriber, data->endpoint); if (result != 0) { ELEKTRA_LOG_WARNING ("zmq_connect error: %s\n", zmq_strerror (zmq_errno ())); zmq_close (data->zmqSubscriber); data->zmqSubscriber = NULL; return; } } if (!data->zmqAdapter) { // attach ZeroMq adater and wait for socket to be writable data->zmqAdapter = elektraIoAdapterZeroMqAttach (data->zmqSubscriber, data->ioBinding, ELEKTRA_IO_ADAPTER_ZEROMQCB_READ, zeroMqRecvSocketReadable, data); if (!data->zmqAdapter) { ELEKTRA_LOG_WARNING ("could not attach zmq adapter"); zmq_close (data->zmqSubscriber); data->zmqSubscriber = NULL; return; } } }
void Chat::run() { char buff[1024] = { 0 }; zmq_pollitem_t items[3] = { 0 }; items[0].socket = subscriber_; items[0].events = ZMQ_POLLIN; items[1].socket = gate_; items[1].events = ZMQ_POLLIN; items[2].fd = STDIN_FILENO; items[2].events = ZMQ_POLLIN; while(true) { int len; int ret = zmq_poll(items, 3, -1); if (items[0].revents & ZMQ_POLLIN) { len = zmq_recv(items[0].socket, buff, sizeof(buff), 0); if (len > 0) { buff[len] = 0; std::cout << buff << std::endl; } else { std::cerr << zmq_strerror(errno); } } if (items[1].revents & ZMQ_POLLIN) { len = zmq_recv(items[1].socket, buff, sizeof(buff), 0); if (len > 0) { handle_gate((Action*)buff); } else { std::cerr << zmq_strerror(errno); } } if (items[2].revents & ZMQ_POLLIN) { std::cin.getline(buff, sizeof(buff)); zmq_send(publisher_, buff, std::cin.gcount(), 0); } } }
/** * @internal * Called whenever the socket becomes readable. * ZeroMq since sends multipart messages atomically (all or nothing) * both message parts are instantly available. * * @param socket ZeroMq socket * @param context context passed to elektraIoAdapterZeroMqAttach() */ static void zeroMqRecvSocketReadable (void * socket, void * context) { ElektraZeroMqRecvPluginData * data = context; char * changeType; char * changedKeyName; zmq_msg_t message; zmq_msg_init (&message); int result = zmq_msg_recv (&message, socket, ZMQ_DONTWAIT); if (result == -1) { ELEKTRA_LOG_WARNING ("receiving change type failed: %s; aborting", zmq_strerror (zmq_errno ())); zmq_msg_close (&message); return; } if (!zmq_msg_more (&message)) { ELEKTRA_LOG_WARNING ("message has only one part; aborting"); zmq_msg_close (&message); return; } int length = zmq_msg_size (&message); changeType = elektraStrNDup (zmq_msg_data (&message), length + 1); changeType[length] = '\0'; ELEKTRA_LOG_DEBUG ("received change type %s", changeType); result = zmq_msg_recv (&message, socket, ZMQ_DONTWAIT); if (result == -1) { ELEKTRA_LOG_WARNING ("receiving key name failed: %s; aborting", zmq_strerror (zmq_errno ())); elektraFree (changeType); zmq_msg_close (&message); return; } length = zmq_msg_size (&message); changedKeyName = elektraStrNDup (zmq_msg_data (&message), length + 1); changedKeyName[length] = '\0'; ELEKTRA_LOG_DEBUG ("received key name %s", changedKeyName); // notify about changes Key * changedKey = keyNew (changedKeyName, KEY_END); data->notificationCallback (changedKey, data->notificationContext); zmq_msg_close (&message); elektraFree (changeType); elektraFree (changedKeyName); }
int main (int argc, char **argv) { int i; char *endpoint = NULL; void *context, *socket; if (argc <= 2) { _usage(argv[0]); return -1; } endpoint = argv[1]; context = zmq_ctx_new(); if (!context) { _ERR("ZeroMQ context: %s\n", zmq_strerror(errno)); return -1; } socket = zmq_socket(context, ZMQ_PUSH); if (!socket) { _ERR("ZeroMQ socket: %s\n", zmq_strerror(errno)); zmq_ctx_destroy(context); return -1; } if (zmq_connect(socket, endpoint) == -1) { _ERR("ZeroMQ connect: %s: %s\n", endpoint, zmq_strerror(errno)); zmq_close(socket); zmq_ctx_destroy(context); return -1; } for (i = 2; i != (argc - 1); i++) { if (zmq_send(socket, argv[i], strlen(argv[i]), ZMQ_SNDMORE) == -1) { _ERR("ZeroMQ send: %s\n", zmq_strerror(errno)); } } if (zmq_send(socket, argv[i], strlen(argv[i]), 0) == -1) { _ERR("ZeroMQ send: %s\n", zmq_strerror(errno)); } zmq_close(socket); zmq_ctx_destroy(context); return 0; }
int raptor_proxy_init(RaptorProxy* rp) { rp->zctx = zmq_init(1); if (!rp->zctx) { dzlog_error("zmq_init failed, err: %s", zmq_strerror(errno)); return -1; } rp->client_socket = zmq_socket(rp->zctx, ZMQ_ROUTER); if (!rp->client_socket) { dzlog_error("create client socket failed, err: %s", zmq_strerror(errno)); return -1; } if (zmq_bind(rp->client_socket, rp->address) != 0) { dzlog_error("client socket bind failed, err: %s", zmq_strerror(errno)); return -1; } rp->worker_socket = zmq_socket(rp->zctx, ZMQ_DEALER); if (!rp->worker_socket) { dzlog_error("create worker socket failed, err: %s", zmq_strerror(errno)); return -1; } if (zmq_bind(rp->worker_socket, "inproc://workers") != 0) { dzlog_error("worker socket bind failed, err: %s", zmq_strerror(errno)); return -1; } // launch pool of worker threads int count; for (count = 0; count < rp->workers; count++) { pthread_t worker; pthread_create(&worker, NULL, worker_routine, rp->zctx); } // connect work threads to client threads via a queue proxy zmq_proxy (rp->client_socket, rp->worker_socket, NULL); return 0; }
void _zmq_connect (void *sock, const char *endpoint) { if (zmq_connect (sock, endpoint) < 0) { fprintf (stderr, "zmq_connect %s: %s\n", endpoint, zmq_strerror(errno)); exit (1); } }
void _zmq_term (void *ctx) { if (zmq_term (ctx) < 0) { fprintf (stderr, "zmq_term: %s\n", zmq_strerror (errno)); exit (1); } }
void _zmq_close (void *socket) { if (zmq_close (socket) < 0) { fprintf (stderr, "zmq_close: %s\n", zmq_strerror (errno)); exit (1); } }
void _zmq_recv (void *socket, zmq_msg_t *msg, int flags) { if (zmq_recv (socket, msg, flags) < 0) { fprintf (stderr, "zmq_recv: %s\n", zmq_strerror (errno)); exit (1); } }
void _zmq_msg_close (zmq_msg_t *msg) { if (zmq_msg_close (msg) < 0) { fprintf (stderr, "zmq_msg_close: %s\n", zmq_strerror (errno)); exit (1); } }
void _zmq_msg_init_size (zmq_msg_t *msg, size_t size) { if (zmq_msg_init_size (msg, size) < 0) { fprintf (stderr, "zmq_msg_init_size: %s\n", zmq_strerror (errno)); exit (1); } }
static void _verr (int errnum, const char *fmt, va_list ap) { char *msg; char buf[128]; const char *s = zmq_strerror (errnum); if (vasprintf (&msg, fmt, ap) < 0) { (void)vsnprintf (buf, sizeof (buf), fmt, ap); msg = buf; } switch (dest) { case DEST_LOGF: if (!logf) logf = stderr; fprintf (logf, "%s: %s: %s\n", prog, msg, s); fflush (logf); break; case DEST_SYSLOG: syslog (syslog_level, "%s: %s", msg, s); break; } if (msg != buf) free (msg); }
void Heartbeat::init(std::string id) { void* zmq_ctx = g_zmq_ctx.get_zmq_ctx(); gate_ = zmq_socket(zmq_ctx, ZMQ_PAIR); if (zmq_connect(gate_, MAGIC_GATE)) { zmq_strerror(errno); } myid_ = id; ios_ptr_ = &g_proactor.get_ios(); udp_socket_.reset(new boost::asio::ip::udp::socket(*ios_ptr_)); strand_.reset(new boost::asio::strand(*ios_ptr_)); strand4peermap_.reset(new boost::asio::strand(*ios_ptr_)); heartbeat_timer_.reset(new boost::asio::deadline_timer(*ios_ptr_, boost::posix_time::milliseconds(HEARTBEAT_INTERVAL))); broadcast_addr_ = boost::asio::ip::address_v4::broadcast(); broadcast_ep_ = boost::asio::ip::udp::endpoint(broadcast_addr_, HEARTBEAT_PORT); udp_socket_->open(boost::asio::ip::udp::v4()); boost::asio::ip::udp::socket::broadcast broadcast_option(true); boost::asio::ip::udp::socket::reuse_address reuse_option(true); boost::asio::ip::multicast::enable_loopback loopback_option(true); udp_socket_->set_option(reuse_option); udp_socket_->set_option(broadcast_option); udp_socket_->set_option(loopback_option); udp_socket_->bind(broadcast_ep_); strand_->post(boost::bind(&Heartbeat::start_receive, this)); send_heartbeat(); }
void* tun_thread_dt(void *data) { tun_thread_data *self = (tun_thread_data*)data; void *socket = zmq_socket(self->d.context, ZMQ_PUSH); int rc = zmq_bind(socket, "inproc://#tun_to_#irc"); char *sbuffer = malloc(sizeof(char)*MTU); if (!sbuffer) goto exit; if (rc) { tun_debug(self, "error when creating IPC socket - %s", zmq_strerror(errno)); goto exit; } // tell_to_other_threads_the_tun2irc_socket_is_binded tun_debug(self, "[data] created tun (data) thread!"); while (1) { int nbytes = ltun_read(self->tun, sbuffer, MTU); if (nbytes > 0) { tun_debug(self, "got %d from tun", nbytes); } if (zmq_send(socket, sbuffer, nbytes, 0) < 0) { tun_debug(self, "error when trying to send a message to the irc thread (warning, we continue here!)", zmq_strerror(errno)); } } exit: if (socket) zmq_close(socket); pthread_exit(NULL); }
void* tun_thread_zmq(void *data) { tun_thread_data *self = (tun_thread_data*)data; char *sbuffer = malloc(sizeof(char)*MTU); void *socket = NULL; if (!sbuffer) goto exit; socket = zmq_socket(self->d.context, ZMQ_PULL); // client of the tun_socket int ret = zmq_bind(socket, "inproc://#irc_to_#tun"); if (ret) { tun_debug(self, "(tun_thread_zmq) error when connecting to IPC socket - %s", zmq_strerror(errno)); goto exit; } while (1) { tun_debug(self, ">> zmq_recv <<"); memset(sbuffer, 0, MTU); int nbytes = zmq_recv(socket, sbuffer, MTU, 0); tun_debug(self, "got %d bytes", nbytes); if (nbytes < 0 ) { tun_debug(self, "error when reading from zeromq socket"); goto exit; // a cute break here! } else if (nbytes == 0) { continue; } else if (nbytes > MTU) { tun_debug(self, "warning: some message got truncated by %d (%d - %d), this means the MTU is too low for you!", nbytes - MTU, nbytes, MTU); } ltun_write(self->tun, sbuffer, nbytes); } exit: if (socket) zmq_close(socket); pthread_exit(NULL); }
int lcapd_process_request(void *hint, const struct lcapnet_request *req) { struct lcap_ctx *ctx = (struct lcap_ctx *)hint; struct px_rpc_hdr *hdr = req->lr_body; size_t msg_len = req->lr_body_len; int rc = 0; if (msg_len < sizeof(*hdr)) { rc = -EPROTO; lcap_error("Received truncated/invalid RPC of size: %zu", msg_len); goto out_reply; } if (hdr->op_type < RPC_OP_FIRST || hdr->op_type > RPC_OP_LAST) { rc = -EINVAL; lcap_error("Received RPC with invalid opcode: %d\n", hdr->op_type); goto out_reply; } rc = rpc_handle_one(ctx, hdr->op_type, req); out_reply: lcap_verb("Received %s RPC [rc=%d | %s]", rpc_optype2str(hdr->op_type), rc, zmq_strerror(-rc)); if (rc < 0) rc = ack_retcode(ctx->cc_sock, NULL, req->lr_remote, rc); return rc; }