static void * worker_thread (void *arg) { zmq::context_t * context = (zmq::context_t *)arg; zmq::socket_t worker (*context, ZMQ_REQ); // We use a string identity for ease here s_set_id (worker); worker.connect("ipc://routing.ipc"); int total = 0; while (1) { // Tell the router we're ready for work s_send (worker, "ready"); // Get workload from router, until finished std::string workload = s_recv (worker); int finished = (workload.compare("END") == 0); if (finished) { std::cout << "Processed: " << total << " tasks" << std::endl; break; } total++; // Do some random work s_sleep(within (100) + 1); } return (NULL); }
static void * worker_task (void *args) { void *context = zmq_ctx_new (); void *worker = zmq_socket (context, ZMQ_REQ); s_set_id (worker); // Set a printable identity zmq_connect (worker, "ipc://backend.ipc"); // Tell broker we're ready for work s_send (worker, "READY"); while (1) { // Read and save all frames until we get an empty frame // In this example there is only 1 but it could be more char *identity = s_recv (worker); char *empty = s_recv (worker); assert (*empty == 0); free (empty); // Get request, send reply char *request = s_recv (worker); printf ("Worker: %s\n", request); free (request); s_sendmore (worker, identity); s_sendmore (worker, ""); s_send (worker, "OK"); free (identity); } zmq_close (worker); zmq_ctx_destroy (context); return NULL; }
int main (void) { void *context = zmq_ctx_new (); void *worker = zmq_socket (context, ZMQ_DEALER); s_set_id (worker); // Set a printable identity zmq_connect (worker, "tcp://localhost:5671"); int total = 0; while (1) { // Tell the broker we're ready for work s_sendmore (worker, ""); s_send (worker, "Hi Boss"); // Get workload from broker, until finished // free (s_recv (worker)); // Envelope delimiter // char *workload = s_recv (worker); // int finished = (strcmp (workload, "Fired!") == 0); // free (workload); // if (finished) { // printf ("Completed: %d tasks\n", total); // break; // } total++; // Do some random work s_sleep (randof (500) + 1); } zmq_close (worker); zmq_ctx_destroy (context); return 0; }
// Worker using REQ socket to do LRU routing // static void * worker_thread (void *args) { void *context = zmq_init (1); void *worker = zmq_socket (context, ZMQ_REQ); s_set_id (worker); // Makes tracing easier zmq_connect (worker, "ipc://backend.ipc"); // Tell broker we're ready for work s_send (worker, "READY"); while (1) { // Read and save all frames until we get an empty frame // In this example there is only 1 but it could be more char *address = s_recv (worker); char *empty = s_recv (worker); assert (*empty == 0); free (empty); // Get request, send reply char *request = s_recv (worker); printf ("Worker: %s\n", request); free (request); s_sendmore (worker, address); s_sendmore (worker, ""); s_send (worker, "OK"); free (address); } zmq_close (worker); zmq_term (context); return NULL; }
static void * worker_task(void *args) { void *context = zmq_init(1); void *worker = zmq_socket(context, ZMQ_REQ); // s_set_id()函数会根据套接字生成一个可打印的字符串, // 并以此作为该套接字的标识。 s_set_id(worker); zmq_connect(worker, "ipc://routing.ipc"); int total = 0; while (1) { // 告诉ROUTER我已经准备好了 s_send(worker, "ready"); // 从ROUTER中获取工作,直到收到结束的信息 char *workload = s_recv(worker); int finished = (strcmp(workload, "END") == 0); free(workload); if (finished) { printf("Processed: %d tasks\n", total); break; } total++; // 随机等待一段时间 s_sleep(randof(1000) + 1); } zmq_close(worker); zmq_term(context); return NULL; }
// Worker using REQ socket to do LRU routing // static void * worker_thread (void *arg) { zmq::context_t context(1); zmq::socket_t worker (context, ZMQ_REQ); s_set_id (worker); // Makes tracing easier worker.connect("ipc://backend.ipc"); // Tell backend we're ready for work s_send (worker, "READY"); while (1) { // Read and save all frames until we get an empty frame // In this example there is only 1 but it could be more std::string address = s_recv (worker); { std::string empty = s_recv (worker); assert (empty.size() == 0); } // Get request, send reply std::string request = s_recv (worker); std::cout << "Worker: " << request << std::endl; s_sendmore (worker, address); s_sendmore (worker, ""); s_send (worker, "OK"); } return (NULL); }
static void * worker_thread (void *context) { void *worker = zmq_socket (context, ZMQ_REQ); // We use a string identity for ease here s_set_id (worker); zmq_connect (worker, "ipc://routing.ipc"); int total = 0; while (1) { // Tell the router we're ready for work s_send (worker, "ready"); // Get workload from router, until finished char *workload = s_recv (worker); int finished = (strcmp (workload, "END") == 0); free (workload); if (finished) { printf ("Processed: %d tasks\n", total); break; } total++; // Do some random work struct timespec t; t.tv_sec = 0; t.tv_nsec = within (100000000) + 1; nanosleep (&t, NULL); } return (NULL); }
// .split worker task // While this example runs in a single process, that is just to make // it easier to start and stop the example. Each thread has its own // context and conceptually acts as a separate process. // This is the worker task, using a REQ socket to do load-balancing. // Because s_send and s_recv can't handle 0MQ binary identities, we // set a printable text identity to allow routing. static void *worker_task(void *args) { void *context = zmq_ctx_new(); void *worker = zmq_socket(context, ZMQ_REQ); #if (defined (WIN32)) s_set_id(worker, (intptr_t)args); zmq_connect(worker, "tcp://localhost:5673"); // backend #else s_set_id(worker); zmq_connect(worker, "ipc://backend.ipc"); #endif // Tell broker we're ready for work s_send(worker, "READY"); while (1) { // Read and save all frames until we get an empty frame // In this example there is only 1, but there could be more char *identity = s_recv(worker); char *empty = s_recv(worker); assert(*empty == 0); free(empty); // Get request, send reply char *request = s_recv(worker); printf("Worker: %s from %s\n", request, identity); free(request); s_sendmore(worker, identity); s_sendmore(worker, ""); s_send(worker, "OK"); free(identity); } zmq_close(worker); zmq_ctx_destroy(context); return NULL; }
// Basic request-reply client using REQ socket // static void * client_thread (void *context) { void *client = zmq_socket (context, ZMQ_REQ); s_set_id (client); // Makes tracing easier zmq_connect (client, "ipc://frontend.ipc"); // Send request, get reply s_send (client, "HELLO"); char *reply = s_recv (client); printf ("Client: %s\n", reply); free (reply); return (NULL); }
// Basic request-reply client using REQ socket // static void * client_thread (void *arg) { zmq::context_t context(1); zmq::socket_t client (context, ZMQ_REQ); s_set_id (client); // Makes tracing easier client.connect("ipc://frontend.ipc"); // Send request, get reply s_send (client, "HELLO"); std::string reply = s_recv (client); std::cout << "Client: " << reply << std::endl; return (NULL); }
// Basic request-reply client using REQ socket // Because s_send and s_recv can't handle 0MQ binary identities, we // set a printable text identity to allow routing. // static void *client_task(void *args) { void *context = zmq_ctx_new(); void *client = zmq_socket(context, ZMQ_REQ); #if (defined (WIN32)) s_set_id(client, (intptr_t)args); zmq_connect(client, "tcp://localhost:5672"); // frontend #else s_set_id(client); // Set a printable identity zmq_connect(client, "ipc://frontend.ipc"); #endif // Send request, get reply s_send(client, "HELLO"); char *reply = s_recv(client); printf("Client: %s\n", reply); free(reply); zmq_close(client); zmq_ctx_destroy(context); return NULL; }
// --------------------------------------------------------------------- // Connect or reconnect to broker void connect_to_broker () { if (m_client) { delete m_client; } m_client = new zmq::socket_t (*m_context, ZMQ_REQ); s_set_id(*m_client); int linger = 0; m_client->setsockopt(ZMQ_LINGER, &linger, sizeof (linger)); //zmq_setsockopt (client, ZMQ_LINGER, &linger, sizeof (linger)); m_client->connect (m_broker.c_str()); if (m_verbose) { s_console ("I: connecting to broker at %s...", m_broker.c_str()); } }
static void * worker_task(void *args) { zmq::context_t context(1); zmq::socket_t worker(context, ZMQ_DEALER); #if (defined (WIN32)) s_set_id(worker, (intptr_t)args); #else s_set_id(worker); // Set a printable identity #endif worker.connect("tcp://localhost:5671"); int total = 0; while (1) { // Tell the broker we're ready for work s_sendmore(worker, ""); s_send(worker, "Hi Boss"); // Get workload from broker, until finished s_recv(worker); // Envelope delimiter std::string workload = s_recv(worker); // .skip if ("Fired!" == workload) { std::cout << "Completed: " << total << " tasks" << std::endl; break; } total++; // Do some random work s_sleep(within(500) + 1); } return NULL; }
// Basic request-reply client using REQ socket // Since s_send and s_recv can't handle 0MQ binary identities we // set a printable text identity to allow routing. // static void * client_task (void *args) { void *context = zmq_ctx_new (); void *client = zmq_socket (context, ZMQ_REQ); s_set_id (client); // Set a printable identity zmq_connect (client, "ipc://frontend.ipc"); // Send request, get reply s_send (client, "HELLO"); char *reply = s_recv (client); printf ("Client: %s\n", reply); free (reply); zmq_close (client); zmq_ctx_destroy (context); return NULL; }
// Worker using REQ socket to do LRU routing // static void * worker_thread (void *context) { void *worker = zmq_socket (context, ZMQ_REQ); s_set_id (worker); // Makes tracing easier zmq_connect (worker, "ipc://backend.ipc"); // Tell broker we're ready for work s_send (worker, "READY"); while (1) { zmsg_t *zmsg = zmsg_recv (worker); printf ("Worker: %s\n", zmsg_body (zmsg)); zmsg_body_set (zmsg, "OK"); zmsg_send (&zmsg, worker); } return (NULL); }
static zmq::socket_t * s_worker_socket (zmq::context_t &context) { zmq::socket_t * worker = new zmq::socket_t(context, ZMQ_DEALER); // Set rand identity to make tracing easier identity = s_set_id(*worker); worker->connect ("tcp://localhost:5556"); // Configure socket to not wait at close time int linger = 0; worker->setsockopt (ZMQ_LINGER, &linger, sizeof (linger)); // Tell queue we're ready for work std::cout << "I: (" << identity << ") worker ready" << std::endl; s_send (*worker, "READY"); return worker; }
/** * Creates a new zh_client_t struct * * @param {char} *host * @return zh_client_t * */ zh_client_t * zh_client_new (char *host) { // initialize a new zh_client_t zh_client_t *client = malloc(sizeof(zh_client_t)); // set the host client->host = host; // create context client->context = zmq_ctx_new(); // assert context integrity assert(client->context); // create responder socket client->socket = zmq_socket(client->context, ZMQ_REQ); // assert socket integrity assert(client->socket); // set unique ID on socket s_set_id(client->socket); // return to caller return client; }
void connect_to_broker () { if (m_worker) { delete m_worker; } m_worker = new zmq::socket_t (*m_context, ZMQ_DEALER); int linger = 0; m_worker->setsockopt (ZMQ_LINGER, &linger, sizeof (linger)); s_set_id(*m_worker); m_worker->connect (m_broker.c_str()); if (m_verbose) s_console ("I: connecting to broker at %s...", m_broker.c_str()); // Register service with broker send_to_broker ((char*)MDPW_READY, m_service, NULL); // If liveness hits zero, queue is considered disconnected m_liveness = HEARTBEAT_LIVENESS; m_heartbeat_at = s_clock () + m_heartbeat; }
void* worker_task (void *args) { GET_OBJECT_RET(DbMgr, iDbMgr, 0); LWDP_LOG_PRINT("ZMQBACKEND", LWDP_LOG_MGR::INFO, "Connect to Db Server Ip:%s User:%s DbName:%s Port:%d", strDbIp.c_str(), strDbUserName.c_str(), strDbName.c_str(), DbPort); DBHandle dbHandle = iDbMgr->Open(strDbIp.c_str(), strDbUserName.c_str(), strDbPassword.c_str(), strDbName.c_str(), DbPort, 0); if(!dbHandle) { LWDP_LOG_PRINT("ZMQBACKEND", LWDP_LOG_MGR::INFO, "Worke Thread Connect Db Error!"); return NULL; } LWDP_LOG_PRINT("ZMQBACKEND", LWDP_LOG_MGR::INFO, "Worke Thread Connect Db Ok!"); GET_OBJECT_RET(ZmqMgr, iZmqMgr, 0); //////////////////////////////////////////////// ContextHandle context = (ContextHandle)Cx_ZmqBackend::mContext; SocketHandle responder = iZmqMgr->GetNewSocket(context, LWDP_REP); s_set_id(responder); GET_OBJECT_RET(ConfigMgr, iConfigMgr, 0); //backend std::string strWorkThread = std::string(LW_ZMQBACKEND_WORKTHREAD_TARGET); XPropertys propWorkThread; iConfigMgr->GetModulePropEntry(LW_ZMQBACKEND_MODULE_NAME, LW_ZMQBACKEND_WORKTHREAD_TARGET_NAME, propWorkThread); if(!propWorkThread[0].propertyText.empty()) { strWorkThread = propWorkThread[0].propertyText; } else { LWDP_LOG_PRINT("ZMQBACKEND", LWDP_LOG_MGR::WARNING, "Can't Find <ConnetTarget> In Config File, Default(%s)", strWorkThread.c_str()); } if(iZmqMgr->Connect(responder, strWorkThread.c_str()) != LWDP_OK) { LWDP_LOG_PRINT("ZMQBACKEND", LWDP_LOG_MGR::ERR, "Connect to Backend Error(%s)", strWorkThread.c_str()); return NULL; } ///////////////////////////////////////////// SocketHandle ctrlClient = iZmqMgr->GetNewSocket(context, LWDP_SUB); s_set_id(ctrlClient); //ctrl_client std::string strCtrlClient = std::string(LW_ZMQBACKEND_CTRL_CLIENT_TARGET); XPropertys propCtrlClient; iConfigMgr->GetModulePropEntry(LW_ZMQBACKEND_MODULE_NAME, LW_ZMQBACKEND_CTRL_CLIENT_NAME, propCtrlClient); if(!propCtrlClient[0].propertyText.empty()) { strCtrlClient = propCtrlClient[0].propertyText; } else { LWDP_LOG_PRINT("ZMQBACKEND", LWDP_LOG_MGR::WARNING, "Can't Find <CtrlConnect> In Config File, Default(%s)", strCtrlClient.c_str()); } if(iZmqMgr->Connect(ctrlClient, strCtrlClient.c_str()) != LWDP_OK) { LWDP_LOG_PRINT("ZMQBACKEND", LWDP_LOG_MGR::ERR, "Connect to Backend Ctrl Error(%s)", strCtrlClient.c_str()); return NULL; } iZmqMgr->Setsockopt(ctrlClient, LWDP_SUBSCRIBE, "", 0); // Initialize poll set LWDP_POLLITEM_T items [] = { { responder, 0, LWDP_POLLIN, 0 }, { ctrlClient, 0, LWDP_POLLIN, 0 } }; // Switch messages between sockets Cx_Interface<Ix_ZMessage> iZMessage; int more = 0; // Multipart detection uint32_ more_size = sizeof (more); while (1) { more = 0; iZmqMgr->Poll(items, 2, -1); if (items [0].revents & LWDP_POLLIN) { while (1) { GET_OBJECT_RET(ZMessage, iTmpMsg, 0); // Wait for next request from client // Process all parts of the message iTmpMsg->InitZMessage(); iZmqMgr->Recv(responder, iTmpMsg, 0); LWDP_LOG_PRINT("ZMQBACKEND", LWDP_LOG_MGR::DEBUG, "Work Thread Revc(%d)", iTmpMsg->Size()); iZmqMgr->Getsockopt(responder, LWDP_RCVMORE, &more, &more_size); if (!more) { iZMessage = iTmpMsg; break; // Last message part } //Api_TaskDelay (1); } LWDP_LOG_PRINT("ZMQBACKEND", LWDP_LOG_MGR::NOTICE, "ZMQ Server Received request: [%d]", iZMessage->Size()); // Do some 'work' GET_OBJECT_RET(ZmqBackend, iZmqBackend, 0); Data_Ptr sendData; sendData.reset(); uint32_ sendLen = 0; LWRESULT res = iZmqBackend->CallBackZmqMsg(dbHandle, (uint8_*)iZMessage->Data(), iZMessage->Size(), sendData, sendLen); if(res != LWDP_OK) { LWDP_LOG_PRINT("ZMQBACKEND", LWDP_LOG_MGR::ERR, "CallBackZmqMsg ret Error(%x)", res); //continue; } if(sendLen < 0) { LWDP_LOG_PRINT("ZMQBACKEND", LWDP_LOG_MGR::ERR, "Zmq Send Data Length is too Small(%d)", sendLen); //continue; } // Send reply back to client GET_OBJECT_RET(ZMessage, iZSMessage, 0); iZSMessage->InitZMessage(); iZSMessage->SetValue(sendData.get(), sendLen); iZmqMgr->Send(responder, iZSMessage, 0); //Api_TaskDelay(1); } if (items [1].revents & LWDP_POLLIN) { while (1) { GET_OBJECT_RET(ZMessage, iTmpMsg, 0); // Wait for next request from client // Process all parts of the message iTmpMsg->InitZMessage(); iZmqMgr->Recv(ctrlClient, iTmpMsg, 0); //LWDP_LOG_PRINT("ZMQBACKEND", LWDP_LOG_MGR::DEBUG, // "Work Thread Revc(%d)", iTmpMsg->Size()); iZmqMgr->Getsockopt(ctrlClient, LWDP_RCVMORE, &more, &more_size); if (!more) { iZMessage = iTmpMsg; break; // Last message part } //Api_TaskDelay (1); } // Do some 'work' GET_OBJECT_RET(ZmqBackend, iZmqBackend, 0); iZmqBackend->CallBackCtrl((const char_ *)iZMessage->Data(), iZMessage->Size()); } } iZmqMgr->CloseSocket(responder); iZmqMgr->CloseContext(context); return 0; }
void Worker::run() { LOG(INFO) << "starting worker"; zmq::context_t context(1); zmq::socket_t socket(context, ZMQ_DEALER); std::string id = s_set_id(socket); socket.setsockopt(ZMQ_IDENTITY, id.c_str(), id.length()); socket.connect("tcp://localhost:5671"); /** * send registration */ AnyRequest rsp; rsp.set_type(AnyRequest_Type_REGISTRATION); Registration *r = new Registration; r->set_id(id); rsp.set_allocated_registration(r); std::string responseStr = rsp.SerializeAsString(); s_sendmore(socket, ""); s_send(socket, responseStr); LOG(INFO) << "sending registration as " << id; zmq::pollitem_t items [] = { { socket, 0, ZMQ_POLLIN, 0 } }; while(1) { zmq::poll(&items[0], 1, kZmqPollIntervalMillisecs_); if (items[0].revents & ZMQ_POLLIN) { /** * Receive requests */ s_recv(socket); // Envelope delimiter std::string request = s_recv(socket); AnyRequest req; req.ParseFromString(request); AnyRequest_Type type = req.type(); if(type == AnyRequest_Type_FETCH_SPLIT_REQUEST) { base::Block<SplitInfo>* block; if(queues_.requestQueue.tryGetWriteSlot(&block)) { SplitInfo f; f.filename = req.fetchsplitrequest().filename(); f.start = req.fetchsplitrequest().start(); f.end = req.fetchsplitrequest().end(); f.schema = req.fetchsplitrequest().schema(); f.objectType = req.fetchsplitrequest().objecttype(); f.id = req.fetchsplitrequest().tag(); block->data = f; queues_.requestQueue.slotWritten(block); } else { LOG(FATAL) << "dropping req because queue is full"; } } else if(type == AnyRequest_Type_HEARTBEAT_REQUEST) { // this can be done quickly so we send the reponse immediately AnyRequest rsp; rsp.set_type(AnyRequest_Type_HEARTBEAT_RESPONSE); HeartBeatResponse *r = new HeartBeatResponse; rsp.set_allocated_heartbeatresponse(r); std::string responseStr = rsp.SerializeAsString(); s_sendmore(socket, ""); s_send(socket, responseStr); LOG(INFO) << "sending heartBeatResponse"; } else if(type == AnyRequest_Type_SHUTDOWN_REQUEST) { LOG(INFO) << "worker exiting ..."; return; } else { LOG(ERROR) << "received unknown request"; } } /** * Send responses if any */ base::Block<SplitInfo> *response; if(queues_.responseQueue.tryGetReadSlot(&response)) { // convert protobuf to string AnyRequest rsp; if(response->data.type == SplitInfo::PROGRESS_UPDATE) { rsp.set_type(AnyRequest_Type_PROGRESS_UPDATE); ProgressUpdate *r = new ProgressUpdate; r->set_bytescompleted(response->data.bytesCompleted); r->set_bytestotal(response->data.bytesTotal); r->set_tag(response->data.filename); rsp.set_allocated_progressupdate(r); LOG(INFO) << "TRANSPORT: sending progressupdate for " << rsp.fetchsplitresponse().tag(); } else if(response->data.type == SplitInfo::FETCH_SPLIT_RESPONSE) { rsp.set_type(AnyRequest_Type_FETCH_SPLIT_RESPONSE); FetchSplitResponse *r = new FetchSplitResponse; r->set_status(response->data.responseCode); r->set_tag(response->data.filename); rsp.set_allocated_fetchsplitresponse(r); LOG(INFO) << "TRANSPORT: sending fetchsplitresponse for " << r->tag(); } std::string responseStr = rsp.SerializeAsString(); s_sendmore(socket, ""); // envelope delimiter s_send(socket, responseStr); queues_.responseQueue.slotRead(response); } boost::this_thread::interruption_point(); } }