Ejemplo n.º 1
0
TEST_F(FileRecvTests, SendDataGetNextChunkIdMethods) {
   //Server will receive data requests from client, but will not respond to them.
   //  Client therefore will timeout:
   int port = GetTcpPort();
   std::string location = GetTcpLocation(port);
   zthread_new(FileRecvTests::SendThreadNextChunkIdDie, reinterpret_cast<void*>(&location));

   FileRecv client;
   client.SetTimeout(1000);

   FileRecv::Socket status = client.SetLocation(location);
   EXPECT_EQ(status, FileRecv::Socket::OK);   
   
   std::vector<uint8_t> p;
   FileRecv::Stream res = client.Receive(p);

   EXPECT_EQ(res, FileRecv::Stream::TIMEOUT);
   EXPECT_EQ(p.size(), 0);
}
Ejemplo n.º 2
0
int main(int argc, char* argv[]){  
	Fix_Initialize(); 

	g_apex_cfg = apex_cfg_new(argc,argv); 
	assert(g_apex_cfg);

	if(g_apex_cfg->log_path){
		zlog_use_file(g_apex_cfg->log_path); 
	} else {
		zlog_use_stdout();
	}  
	g_zmq_context = zctx_new(1);
	g_mutex_hash = zmutex_new(); 
	g_msgid2reply = hash_new(&g_hctrl_msgid2reply, NULL);


	char* broker = zstrdup(g_apex_cfg->broker);

	int thread_count = g_apex_cfg->worker_threads;
	zthread_t* reply_threads = (zthread_t*)zmalloc(thread_count*sizeof(zthread_t));
	zthread_t* recv_threads = (zthread_t*)zmalloc(thread_count*sizeof(zthread_t));

	for(int i=0; i<g_apex_cfg->worker_threads; i++){
		//reply_threads[i] = zthread_new(thread_reply, zstrdup(broker));
		recv_threads[i] = zthread_new(thread_recv, zstrdup(broker));
	}  
	zfree(broker); 

	for(int i=0; i<thread_count; i++){
		//zthread_join(reply_threads[i]);
		zthread_join(recv_threads[i]);
	} 

	zmutex_destroy(&g_mutex_hash);  
	hash_destroy(&g_msgid2reply);
	apex_cfg_destroy(&g_apex_cfg); 
	zctx_destroy(&g_zmq_context);	
	
	
	Fix_Uninitialize();  
	return 0;
}
Ejemplo n.º 3
0
int main (int argc, char *argv [])
{
    //  First argument is this broker's name
    //  Other arguments are our peers' names
    //
    if (argc < 2) {
        printf ("syntax: peering3 me {you}...\n");
        exit (EXIT_FAILURE);
    }
    self = argv [1];
    printf ("I: preparing broker at %s...\n", self);
    srandom ((unsigned) time (NULL));

    //  Prepare our context and sockets
    zctx_t *ctx = zctx_new ();
    char endpoint [256];

    //  Bind cloud frontend to endpoint
    void *cloudfe = zsocket_new (ctx, ZMQ_ROUTER);
    zsockopt_set_identity (cloudfe, self);
    zsocket_bind (cloudfe, "ipc://%s-cloud.ipc", self);

    //  Bind state backend / publisher to endpoint
    void *statebe = zsocket_new (ctx, ZMQ_PUB);
    zsocket_bind (statebe, "ipc://%s-state.ipc", self);

    //  Connect cloud backend to all peers
    void *cloudbe = zsocket_new (ctx, ZMQ_ROUTER);
    zsockopt_set_identity (cloudbe, self);
    int argn;
    for (argn = 2; argn < argc; argn++) {
        char *peer = argv [argn];
        printf ("I: connecting to cloud frontend at '%s'\n", peer);
        zsocket_connect (cloudbe, "ipc://%s-cloud.ipc", peer);
    }

    //  Connect statefe to all peers
    void *statefe = zsocket_new (ctx, ZMQ_SUB);
    for (argn = 2; argn < argc; argn++) {
        char *peer = argv [argn];
        printf ("I: connecting to state backend at '%s'\n", peer);
        zsocket_connect (statefe, "ipc://%s-state.ipc", peer);
    }
    //  Prepare local frontend and backend
    void *localfe = zsocket_new (ctx, ZMQ_ROUTER);
    zsocket_bind (localfe, "ipc://%s-localfe.ipc", self);

    void *localbe = zsocket_new (ctx, ZMQ_ROUTER);
    zsocket_bind (localbe, "ipc://%s-localbe.ipc", self);

    //  Prepare monitor socket
    void *monitor = zsocket_new (ctx, ZMQ_PULL);
    zsocket_bind (monitor, "ipc://%s-monitor.ipc", self);

    //  Start local workers
    int worker_nbr;
    for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++)
        zthread_new (ctx, worker_task, NULL);

    //  Start local clients
    int client_nbr;
    for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++)
        zthread_new (ctx, client_task, NULL);

    //  Interesting part
    //  -------------------------------------------------------------
    //  Publish-subscribe flow
    //  - Poll statefe and process capacity updates
    //  - Each time capacity changes, broadcast new value
    //  Request-reply flow
    //  - Poll primary and process local/cloud replies
    //  - While worker available, route localfe to local or cloud

    //  Queue of available workers
    int local_capacity = 0;
    int cloud_capacity = 0;
    zlist_t *workers = zlist_new ();

    while (1) {
        zmq_pollitem_t primary [] = {
            { localbe, 0, ZMQ_POLLIN, 0 },
            { cloudbe, 0, ZMQ_POLLIN, 0 },
            { statefe, 0, ZMQ_POLLIN, 0 },
            { monitor, 0, ZMQ_POLLIN, 0 }
        };
        //  If we have no workers anyhow, wait indefinitely
        int rc = zmq_poll (primary, 4,
                           local_capacity? 1000 * ZMQ_POLL_MSEC: -1);
        if (rc == -1)
            break;              //  Interrupted

        //  Track if capacity changes during this iteration
        int previous = local_capacity;

        //  Handle reply from local worker
        zmsg_t *msg = NULL;

        if (primary [0].revents & ZMQ_POLLIN) {
            msg = zmsg_recv (localbe);
            if (!msg)
                break;          //  Interrupted
            zframe_t *address = zmsg_unwrap (msg);
            zlist_append (workers, address);
            local_capacity++;

            //  If it's READY, don't route the message any further
            zframe_t *frame = zmsg_first (msg);
            if (memcmp (zframe_data (frame), LRU_READY, 1) == 0)
                zmsg_destroy (&msg);
        }
        //  Or handle reply from peer broker
        else if (primary [1].revents & ZMQ_POLLIN) {
            msg = zmsg_recv (cloudbe);
            if (!msg)
                break;          //  Interrupted
            //  We don't use peer broker address for anything
            zframe_t *address = zmsg_unwrap (msg);
            zframe_destroy (&address);
        }
        //  Route reply to cloud if it's addressed to a broker
        for (argn = 2; msg && argn < argc; argn++) {
            char *data = (char *) zframe_data (zmsg_first (msg));
            size_t size = zframe_size (zmsg_first (msg));
            if (size == strlen (argv [argn])
                    &&  memcmp (data, argv [argn], size) == 0)
                zmsg_send (&msg, cloudfe);
        }
        //  Route reply to client if we still need to
        if (msg)
            zmsg_send (&msg, localfe);

        //  Handle capacity updates
        if (primary [2].revents & ZMQ_POLLIN) {
            char *status = zstr_recv (statefe);
            cloud_capacity = atoi (status);
            free (status);
        }
        //  Handle monitor message
        if (primary [3].revents & ZMQ_POLLIN) {
            char *status = zstr_recv (monitor);
            printf ("%s\n", status);
            free (status);
        }

        //  Now route as many clients requests as we can handle
        //  - If we have local capacity we poll both localfe and cloudfe
        //  - If we have cloud capacity only, we poll just localfe
        //  - Route any request locally if we can, else to cloud
        //
        while (local_capacity + cloud_capacity) {
            zmq_pollitem_t secondary [] = {
                { localfe, 0, ZMQ_POLLIN, 0 },
                { cloudfe, 0, ZMQ_POLLIN, 0 }
            };
            if (local_capacity)
                rc = zmq_poll (secondary, 2, 0);
            else
                rc = zmq_poll (secondary, 1, 0);
            assert (rc >= 0);

            if (secondary [0].revents & ZMQ_POLLIN)
                msg = zmsg_recv (localfe);
            else if (secondary [1].revents & ZMQ_POLLIN)
                msg = zmsg_recv (cloudfe);
            else
                break;      //  No work, go back to primary

            if (local_capacity) {
                zframe_t *frame = (zframe_t *) zlist_pop (workers);
                zmsg_wrap (msg, frame);
                zmsg_send (&msg, localbe);
                local_capacity--;
            }
            else {
                //  Route to random broker peer
                int random_peer = randof (argc - 2) + 2;
                zmsg_pushmem (msg, argv [random_peer], strlen (argv [random_peer]));
                zmsg_send (&msg, cloudbe);
            }
        }
        if (local_capacity != previous) {
            //  We stick our own address onto the envelope
            zstr_sendm (statebe, self);
            //  Broadcast new capacity
            zstr_sendf (statebe, "%d", local_capacity);
        }
    }
    //  When we're done, clean up properly
    while (zlist_size (workers)) {
        zframe_t *frame = (zframe_t *) zlist_pop (workers);
        zframe_destroy (&frame);
    }
    zlist_destroy (&workers);
    zctx_destroy (&ctx);
    return EXIT_SUCCESS;
}
Ejemplo n.º 4
0
int main (int argc, char *argv [])
{
    //  First argument is this broker's name
    //  Other arguments are our peers' names
    //
    if (argc < 2) {
        printf ("syntax: peering2 me {you}...\n");
        return 0;
    }
    self = argv [1];
    printf ("I: preparing broker at %s...\n", self);
    srandom ((unsigned) time (NULL));

    zctx_t *ctx = zctx_new ();

    //  Bind cloud frontend to endpoint
    void *cloudfe = zsocket_new (ctx, ZMQ_ROUTER);
    zsocket_set_identity (cloudfe, self);
    zsocket_bind (cloudfe, "ipc://%s-cloud.ipc", self);

    //  Connect cloud backend to all peers
    void *cloudbe = zsocket_new (ctx, ZMQ_ROUTER);
    zsocket_set_identity (cloudbe, self);
    int argn;
    for (argn = 2; argn < argc; argn++) {
        char *peer = argv [argn];
        printf ("I: connecting to cloud frontend at '%s'\n", peer);
        zsocket_connect (cloudbe, "ipc://%s-cloud.ipc", peer);
    }
    //  Prepare local frontend and backend
    void *localfe = zsocket_new (ctx, ZMQ_ROUTER);
    zsocket_bind (localfe, "ipc://%s-localfe.ipc", self);
    void *localbe = zsocket_new (ctx, ZMQ_ROUTER);
    zsocket_bind (localbe, "ipc://%s-localbe.ipc", self);

    //  Get user to tell us when we can start...
    printf ("Press Enter when all brokers are started: ");
    getchar ();

    //  Start local workers
    int worker_nbr;
    for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++)
        zthread_new (worker_task, NULL);

    //  Start local clients
    int client_nbr;
    for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++)
        zthread_new (client_task, NULL);

    //  .split request-reply handling
    //  Here, we handle the request-reply flow. We're using load-balancing
    //  to poll workers at all times, and clients only when there are one 
    //  or more workers available.

    //  Least recently used queue of available workers
    int capacity = 0;
    zlist_t *workers = zlist_new ();

    while (true) {
        //  First, route any waiting replies from workers
        zmq_pollitem_t backends [] = {
            { localbe, 0, ZMQ_POLLIN, 0 },
            { cloudbe, 0, ZMQ_POLLIN, 0 }
        };
        //  If we have no workers, wait indefinitely
        int rc = zmq_poll (backends, 2,
            capacity? 1000 * ZMQ_POLL_MSEC: -1);
        if (rc == -1)
            break;              //  Interrupted

        //  Handle reply from local worker
        zmsg_t *msg = NULL;
        if (backends [0].revents & ZMQ_POLLIN) {
            msg = zmsg_recv (localbe);
            if (!msg)
                break;          //  Interrupted
            zframe_t *identity = zmsg_unwrap (msg);
            zlist_append (workers, identity);
            capacity++;

            //  If it's READY, don't route the message any further
            zframe_t *frame = zmsg_first (msg);
            if (memcmp (zframe_data (frame), WORKER_READY, 1) == 0)
                zmsg_destroy (&msg);
        }
        //  Or handle reply from peer broker
        else
        if (backends [1].revents & ZMQ_POLLIN) {
            msg = zmsg_recv (cloudbe);
            if (!msg)
                break;          //  Interrupted
            //  We don't use peer broker identity for anything
            zframe_t *identity = zmsg_unwrap (msg);
            zframe_destroy (&identity);
        }
        //  Route reply to cloud if it's addressed to a broker
        for (argn = 2; msg && argn < argc; argn++) {
            char *data = (char *) zframe_data (zmsg_first (msg));
            size_t size = zframe_size (zmsg_first (msg));
            if (size == strlen (argv [argn])
            &&  memcmp (data, argv [argn], size) == 0)
                zmsg_send (&msg, cloudfe);
        }
        //  Route reply to client if we still need to
        if (msg)
            zmsg_send (&msg, localfe);

        //  .split route client requests
        //  Now we route as many client requests as we have worker capacity
        //  for. We may reroute requests from our local frontend, but not from 
        //  the cloud frontend. We reroute randomly now, just to test things
        //  out. In the next version, we'll do this properly by calculating
        //  cloud capacity:

        while (capacity) {
            zmq_pollitem_t frontends [] = {
                { localfe, 0, ZMQ_POLLIN, 0 },
                { cloudfe, 0, ZMQ_POLLIN, 0 }
            };
            rc = zmq_poll (frontends, 2, 0);
            assert (rc >= 0);
            int reroutable = 0;
            //  We'll do peer brokers first, to prevent starvation
            if (frontends [1].revents & ZMQ_POLLIN) {
                msg = zmsg_recv (cloudfe);
                reroutable = 0;
            }
            else
            if (frontends [0].revents & ZMQ_POLLIN) {
                msg = zmsg_recv (localfe);
                reroutable = 1;
            }
            else
                break;      //  No work, go back to backends

            //  If reroutable, send to cloud 20% of the time
            //  Here we'd normally use cloud status information
            //
            if (reroutable && argc > 2 && randof (5) == 0) {
                //  Route to random broker peer
                int peer = randof (argc - 2) + 2;
                zmsg_pushmem (msg, argv [peer], strlen (argv [peer]));
                zmsg_send (&msg, cloudbe);
            }
            else {
                zframe_t *frame = (zframe_t *) zlist_pop (workers);
                zmsg_wrap (msg, frame);
                zmsg_send (&msg, localbe);
                capacity--;
            }
        }
    }
    //  When we're done, clean up properly
    while (zlist_size (workers)) {
        zframe_t *frame = (zframe_t *) zlist_pop (workers);
        zframe_destroy (&frame);
    }
    zlist_destroy (&workers);
    zctx_destroy (&ctx);
    return EXIT_SUCCESS;
}
Ejemplo n.º 5
0
int main(void)
{
	zctx_t *ctx = zctx_new();
	void *frontend = zsocket_new(ctx, ZMQ_ROUTER);
	void *backend = zsocket_new(ctx, ZMQ_ROUTER);

	// IPC doesn't yet work on MS Windows.
#if (defined (WIN32))
	zsocket_bind(frontend, "tcp://*:5672");
	zsocket_bind(backend, "tcp://*:5673");
#else
	zsocket_bind(frontend, "ipc://frontend.ipc");
	zsocket_bind(backend, "ipc://backend.ipc");
#endif

	int client_nbr;
	for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++)
		zthread_new(client_task, NULL);
	int worker_nbr;
	for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++)
		zthread_new(worker_task, NULL);

	//  Queue of available workers
	zlist_t *workers = zlist_new();

	//  .split main load-balancer loop
	//  Here is the main loop for the load balancer. It works the same way
	//  as the previous example, but is a lot shorter because CZMQ gives
	//  us an API that does more with fewer calls:
	while (1) {
		zmq_pollitem_t items[] = {
				{ backend, 0, ZMQ_POLLIN, 0 },
				{ frontend, 0, ZMQ_POLLIN, 0 }
		};
		//  Poll frontend only if we have available workers
		int rc = zmq_poll(items, zlist_size(workers) ? 2 : 1, -1);
		if (rc == -1)
			break;              //  Interrupted

		//  Handle worker activity on backend
		if (items[0].revents & ZMQ_POLLIN) {
			//  Use worker identity for load-balancing
			zmsg_t *msg = zmsg_recv(backend);
			if (!msg)
				break;          //  Interrupted

#if 0
			// zmsg_unwrap is DEPRECATED as over-engineered, poor style
			zframe_t *identity = zmsg_unwrap(msg);
#else
			zframe_t *identity = zmsg_pop(msg);
			zframe_t *delimiter = zmsg_pop(msg);
			zframe_destroy(&delimiter); 
#endif

			zlist_append(workers, identity);

			//  Forward message to client if it's not a READY
			zframe_t *frame = zmsg_first(msg);
			if (memcmp(zframe_data(frame), WORKER_READY, strlen(WORKER_READY)) == 0) {
				zmsg_destroy(&msg);
			} else {
				zmsg_send(&msg, frontend);
				if (--client_nbr == 0)
					break; // Exit after N messages
			}
		}
		if (items[1].revents & ZMQ_POLLIN) {
			//  Get client request, route to first available worker
			zmsg_t *msg = zmsg_recv(frontend);
			if (msg) {
#if 0
				// zmsg_wrap is DEPRECATED as unsafe
				zmsg_wrap(msg, (zframe_t *)zlist_pop(workers));
#else
				zmsg_pushmem(msg, NULL, 0); // delimiter
				zmsg_push(msg, (zframe_t *)zlist_pop(workers));
#endif

				zmsg_send(&msg, backend);
			}
		}
	}
	//  When we're done, clean up properly
	while (zlist_size(workers)) {
		zframe_t *frame = (zframe_t *)zlist_pop(workers);
		zframe_destroy(&frame);
	}
	zlist_destroy(&workers);
	zctx_destroy(&ctx);
	return 0;
}
Ejemplo n.º 6
0
int 
main(int argc, char* argv[])
{
  int i, client_num, worker_num;
  zctx_t* ctx;
  void* frontend;
  void* backend;
  zlist_t* workers;

  if (argc < 3) {
    fprintf(stderr, "arguments error ...\n");
    return 1;
  }
  client_num = atoi(argv[1]);
  worker_num = atoi(argv[2]);

  ctx = zctx_new();


  frontend = zsocket_new(ctx, ZMQ_ROUTER);
  backend = zsocket_new(ctx, ZMQ_ROUTER);
  zsocket_bind(frontend, "ipc://frontend.ipc");
  zsocket_bind(backend, "ipc://backend.ipc");

  for (i = 0; i < client_num; ++i)
    zthread_new(client_routine, NULL);
  for (i = 0; i < worker_num; ++i)
    zthread_new(worker_routine, NULL);

  workers = zlist_new();
  while (1) {
    zmq_pollitem_t items[] = {
      {backend, 0, ZMQ_POLLIN, 0}, 
      {frontend, 0, ZMQ_POLLIN, 0}, 
    };
    int rc = zmq_poll(items, zlist_size(workers) ? 2 : 1, -1);
    if (-1 == rc)
      break;

    if (items[0].revents & ZMQ_POLLIN) {
      zmsg_t* msg;
      zframe_t* identity;
      zframe_t* frame;

      msg = zmsg_recv(backend);
      if (NULL == msg)
        break;
      identity = zmsg_unwrap(msg);
      zlist_append(workers, identity);

      frame = zmsg_first(msg);
      if (0 == memcmp(zframe_data(frame), WORKER_READY, 1))
        zmsg_destroy(&msg);
      else 
        zmsg_send(&msg, frontend);
    }
    if (items[1].revents & ZMQ_POLLIN) {
      zmsg_t* msg = zmsg_recv(frontend);
      if (NULL != msg) {
        zmsg_wrap(msg, (zframe_t*)zlist_pop(workers));
        zmsg_send(&msg, backend);
      }
    }
  }

  while (zlist_size(workers)) {
    zframe_t* frame = (zframe_t*)zlist_pop(workers);
    zframe_destroy(&frame);
  }


  zlist_destroy(&workers);
  zctx_destroy(&ctx);
  return 0;
}
Ejemplo n.º 7
0
void
curve_server_test (bool verbose)
{
    printf (" * curve_server: ");

    //  @selftest
    //  Create temporary directory for test files
    srand (time (NULL));
    zsys_dir_create (TESTDIR);
    
    zcert_t *server_cert = zcert_new ();
    zcert_save (server_cert, TESTDIR "/server.cert");

    //  Install the authenticator
    zctx_t *ctx = zctx_new ();
    zauth_t *auth = zauth_new (ctx);
    assert (auth);
    zauth_set_verbose (auth, verbose);
    zauth_configure_curve (auth, "*", TESTDIR);

    //  We'll run a set of clients as background tasks, and the
    //  server in this foreground thread. Don't pass verbose to
    //  the clients as the results are unreadable.
    int live_clients;
    for (live_clients = 0; live_clients < 5; live_clients++)
        zthread_new (client_task, &verbose);

    curve_server_t *server = curve_server_new (ctx, &server_cert);
    curve_server_set_verbose (server, verbose);
    curve_server_bind (server, "tcp://127.0.0.1:9006");
    
    while (live_clients > 0) {
        zmsg_t *msg = curve_server_recv (server);
        if (memcmp (zframe_data (zmsg_last (msg)), "END", 3) == 0)
            live_clients--;
        curve_server_send (server, &msg);
    }

    //  Try an invalid client/server combination
    byte bad_server_key [32] = { 0 };
    zcert_t *unknown = zcert_new ();
    curve_client_t *client = curve_client_new (&unknown);
    curve_client_set_verbose (client, true);
    curve_client_connect (client, "tcp://127.0.0.1:9006", bad_server_key);
    curve_client_sendstr (client, "Hello, World");

    //  Expect no reply after 250msec
    zmq_pollitem_t pollitems [] = {
        { curve_client_handle (client), 0, ZMQ_POLLIN, 0 }
    };
    assert (zmq_poll (pollitems, 1, 250) == 0);
    curve_client_destroy (&client);

    //  Delete all test files
    zdir_t *dir = zdir_new (TESTDIR, NULL);
    zdir_remove (dir, true);
    zdir_destroy (&dir);

    curve_server_destroy (&server);
    zauth_destroy (&auth);
    zctx_destroy (&ctx);
    //  @end
   
    //  Ensure client threads have exited before we do
    zclock_sleep (100);
    printf ("OK\n");
}
Ejemplo n.º 8
0
void
curve_client_test (bool verbose)
{
    printf (" * curve_client: ");
    //  @selftest
    //  Create temporary directory for test files
    zsys_dir_create (TESTDIR);
    
    //  We'll create two new certificates and save the client public 
    //  certificate on disk; in a real case we'd transfer this securely
    //  from the client machine to the server machine.
    zcert_t *server_cert = zcert_new ();
    zcert_save (server_cert, TESTDIR "/server.cert");

    //  We'll run the server as a background task, and the
    //  client in this foreground thread.
    zthread_new (server_task, &verbose);

    zcert_t *client_cert = zcert_new ();
    zcert_save_public (client_cert, TESTDIR "/client.cert");

    curve_client_t *client = curve_client_new (&client_cert);
    curve_client_set_metadata (client, "Client", "CURVEZMQ/curve_client");
    curve_client_set_metadata (client, "Identity", "E475DA11");
    curve_client_set_verbose (client, verbose);
    curve_client_connect (client, "tcp://127.0.0.1:9005", (byte *)zcert_public_key (server_cert));

    curve_client_sendstr (client, "Hello, World");
    char *reply = curve_client_recvstr (client);
    assert (streq (reply, "Hello, World"));
    free (reply);

    //  Try a multipart message
    zmsg_t *msg = zmsg_new ();
    zmsg_addstr (msg, "Hello, World");
    zmsg_addstr (msg, "Second frame");
    curve_client_send (client, &msg);
    msg = curve_client_recv (client);
    assert (zmsg_size (msg) == 2);
    zmsg_destroy (&msg);

    //  Now send messages of increasing size, check they work
    int count;
    int size = 0;
    for (count = 0; count < 18; count++) {
        if (verbose)
            printf ("Testing message of size=%d...\n", size);

        zframe_t *data = zframe_new (NULL, size);
        int byte_nbr;
        //  Set data to sequence 0...255 repeated
        for (byte_nbr = 0; byte_nbr < size; byte_nbr++)
            zframe_data (data)[byte_nbr] = (byte) byte_nbr;
        msg = zmsg_new ();
        zmsg_prepend (msg, &data);
        curve_client_send (client, &msg);

        msg = curve_client_recv (client);
        data = zmsg_pop (msg);
        assert (data);
        assert (zframe_size (data) == size);
        for (byte_nbr = 0; byte_nbr < size; byte_nbr++) {
            assert (zframe_data (data)[byte_nbr] == (byte) byte_nbr);
        }
        zframe_destroy (&data);
        zmsg_destroy (&msg);
        size = size * 2 + 1;
    }
    //  Signal end of test
    curve_client_sendstr (client, "END");
    reply = curve_client_recvstr (client);
    free (reply);

    zcert_destroy (&server_cert);
    zcert_destroy (&client_cert);
    curve_client_destroy (&client);
    
    //  Delete all test files
    zdir_t *dir = zdir_new (TESTDIR, NULL);
    zdir_remove (dir, true);
    zdir_destroy (&dir);
    //  @end

    //  Ensure server thread has exited before we do
    zclock_sleep (100);
    printf ("OK\n");
}
Ejemplo n.º 9
0
int main (void)
{
    zctx_t *ctx = zctx_new ();
    void *frontend = zsocket_new (ctx, ZMQ_ROUTER);
    void *backend = zsocket_new (ctx, ZMQ_ROUTER);
    zsocket_bind (frontend, "ipc://frontend.ipc");
    zsocket_bind (backend, "ipc://backend.ipc");

    int client_nbr;
    for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++)
        zthread_new (client_task, NULL);
    int worker_nbr;
    for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++)
        zthread_new (worker_task, NULL);

    //  Queue of available workers
    zlist_t *workers = zlist_new ();

    //  Here is the main loop for the load-balancer. It works the same way
    //  as the previous example, but is a lot shorter because CZMQ gives
    //  us an API that does more with fewer calls:
    while (true) {
        zmq_pollitem_t items [] = {
            { backend,  0, ZMQ_POLLIN, 0 },
            { frontend, 0, ZMQ_POLLIN, 0 }
        };
        //  Poll frontend only if we have available workers
        int rc = zmq_poll (items, zlist_size (workers)? 2: 1, -1);
        if (rc == -1)
            break;              //  Interrupted

        //  Handle worker activity on backend
        if (items [0].revents & ZMQ_POLLIN) {
            //  Use worker identity for load-balancing
            zmsg_t *msg = zmsg_recv (backend);
            if (!msg)
                break;          //  Interrupted
            zframe_t *identity = zmsg_unwrap (msg);
            zlist_append (workers, identity);

            //  Forward message to client if it's not a READY
            zframe_t *frame = zmsg_first (msg);
            if (memcmp (zframe_data (frame), WORKER_READY, 1) == 0)
                zmsg_destroy (&msg);
            else
                zmsg_send (&msg, frontend);
        }
        if (items [1].revents & ZMQ_POLLIN) {
            //  Get client request, route to first available worker
            zmsg_t *msg = zmsg_recv (frontend);
            if (msg) {
                zmsg_wrap (msg, (zframe_t *) zlist_pop (workers));
                zmsg_send (&msg, backend);
            }
        }
    }
    //  When we're done, clean up properly
    while (zlist_size (workers)) {
        zframe_t *frame = (zframe_t *) zlist_pop (workers);
        zframe_destroy (&frame);
    }
    zlist_destroy (&workers);
    zctx_destroy (&ctx);
    return 0;
}
Ejemplo n.º 10
0
int 
main(int argc, char* argv[])
{
  zctx_t* ctx;
  void* cloudfe;
  void* cloudbe;
  int i;
  void* localfe;
  void* localbe;
  int capacity = 0;
  zlist_t* workers;

  if (argc < 2) {
    fprintf(stderr, "syntax: peering me {you} ...\n");
    return 1;
  }
  self = argv[1];
  fprintf(stdout, "I: preparing broker at %s ...\n", self);
  srand((unsigned int)time(NULL));

  ctx = zctx_new();

  cloudfe = zsocket_new(ctx, ZMQ_ROUTER);
  zsockopt_set_identity(cloudfe, self);
  zsocket_bind(cloudfe, "ipc://%s-cloud.ipc", self);

  cloudbe = zsocket_new(ctx, ZMQ_ROUTER);
  zsockopt_set_identity(cloudbe, self);
  for (i = 2; i < argc; ++i) {
    char* peer = argv[i];
    fprintf(stdout, "I: connecting to cloud frontend at '%s'\n", peer);
    zsocket_connect(cloudbe, "ipc://%s-cloud.ipc", peer);
  }

  localfe = zsocket_new(ctx, ZMQ_ROUTER);
  zsocket_bind(localfe, "ipc://%s-localfe.ipc", self);
  localbe = zsocket_new(ctx, ZMQ_ROUTER);
  zsocket_bind(localbe, "ipc://%s-localbe.ipc", self);

  fprintf(stdout, "Press Enter when all brokers are started: ");
  getchar();

  for (i = 0; i < NUM_WORKERS; ++i)
    zthread_new(worker_routine, NULL);
  for (i = 0; i < NUM_CLIENTS; ++i)
    zthread_new(client_routine, NULL);

  workers = zlist_new();
  while (1) {
    zmsg_t* msg;
    zmq_pollitem_t backends[] = {
      {localbe, 0, ZMQ_POLLIN, 0}, 
      {cloudbe, 0, ZMQ_POLLIN, 0}, 
    };
    int r = zmq_poll(backends, 2, capacity ? 1000 * ZMQ_POLL_MSEC : -1);
    if (-1 == r)
      break;

    msg = NULL;
    if (backends[0].revents & ZMQ_POLLIN) {
      zframe_t* identity;
      zframe_t* frame;
      msg = zmsg_recv(localbe);
      if (!msg)
        break;

      identity = zmsg_unwrap(msg);
      zlist_append(workers, identity);
      ++capacity;

      frame = zmsg_first(msg);
      if (0 == memcmp(zframe_data(frame), WORKER_READY, 1))
        zmsg_destroy(&msg);
    }
    else if (backends[1].revents & ZMQ_POLLIN) {
      zframe_t* identity;
      msg = zmsg_recv(cloudbe);
      if (!msg)
        break;

      identity = zmsg_unwrap(msg);
      zframe_destroy(&identity);
    }
    for (i = 2; msg && i < argc; ++i) {
      char* data = (char*)zframe_data(zmsg_first(msg));
      size_t size = zframe_size(zmsg_first(msg));
      if (size == strlen(argv[i]) && 0 == memcmp(data, argv[i], size))
        zmsg_send(&msg, cloudfe);
    }
    if (msg) 
      zmsg_send(&msg, localfe);


    while (capacity) {
      int reroutable = 0;
      zmq_pollitem_t frontends[] = {
        {localfe, 0, ZMQ_POLLIN, 0}, 
        {cloudfe, 0, ZMQ_POLLIN, 0},
      };
      r = zmq_poll(frontends, 2, 0);
      assert(r >= 0);

      if (frontends[1].revents & ZMQ_POLLIN) {
        msg = zmsg_recv(cloudfe);
        reroutable = 0;
      }
      else if (frontends[0].revents & ZMQ_POLLIN) {
        msg = zmsg_recv(localfe);
        reroutable = 1;
      }
      else 
        break;


      if (0 != reroutable && argc > 2 && 0 == rand() % 5) {
        int random_peer = rand() % (argc - 2) + 2;
        zmsg_pushmem(msg, argv[random_peer], strlen(argv[random_peer]));
        zmsg_send(&msg, cloudbe);
      }
      else {
        zframe_t* frame = (zframe_t*)zlist_pop(workers);
        zmsg_wrap(msg, frame);
        zmsg_send(&msg, localbe);
        --capacity;
      }
    }
  }

  while (zlist_size(workers)) {
    zframe_t* frame = (zframe_t*)zlist_pop(workers);
    zframe_destroy(&frame);
  }
  zlist_destroy(&workers);

  zctx_destroy(&ctx);
  return 0;
}
Ejemplo n.º 11
0
int main (int argc, char **argv)
{
    if (argc <= 1) {
        info("Please launch global_server instead of zone_server. (argc=%d)", argc);
        exit (0);
    }

    ZoneServer *zoneServer = NULL;
    BarrackServer *barrackServer = NULL;
    SocialServer *socialServer = NULL;

    // === Crash handler ===
#ifdef WIN32
    SetUnhandledExceptionFilter (crashHandler);
#else
    struct sigaction sa = {};
    sa.sa_flags = SA_SIGINFO;
    sigemptyset(&sa.sa_mask);
    sa.sa_sigaction = crashHandler;
    sigaction(SIGSEGV, &sa, NULL);
    sigaction(SIGABRT, &sa, NULL);
#endif

    // === Read the command line arguments ===
    RouterId_t routerId = atoi(*++argv);
    char *routerIp = *++argv;
    int port = atoi(*++argv);
    uint16_t workersCount = atoi(*++argv);
    char *globalServerIp = *++argv;
    int globalServerPort = atoi(*++argv);
    char *sqlHostname = *++argv;
    char *sqlUsername = *++argv;
    char *sqlPassword = *++argv;
    char *sqlDatabase = *++argv;
    char *redisHostname = *++argv;
    int redisPort = atoi(*++argv);
    ServerType serverType = atoi(*++argv);
    char *output = *++argv;

    // Set a custom output
    dbgSetCustomOutput(output);

    // For Windows, change the console title
#ifdef WIN32
    switch (serverType) {
    case SERVER_TYPE_BARRACK:
        SetConsoleTitle(zsys_sprintf("Barrack (%d)", routerId));
        break;

    case SERVER_TYPE_ZONE:
        SetConsoleTitle(zsys_sprintf("Zone (%d)", routerId));
        break;

    case SERVER_TYPE_SOCIAL:
        SetConsoleTitle(zsys_sprintf("Social (%d)", routerId));
        break;

    default :
        SetConsoleTitle(zsys_sprintf("UNKNOWN (%d)", routerId));
        break;
    }
#endif

    // === Build the Event Server ===
    EventServer *eventServer;
    EventServerInfo eventServerInfo;
    if (!(eventServerInfoInit(&eventServerInfo, routerId, workersCount, redisHostname, redisPort))) {
        error("Cannot initialize the event server.");
        return -1;
    }
    if (!(eventServer = eventServerNew(&eventServerInfo, serverType))) {
        error("Cannot create the event server.");
        return -1;
    }
    if ((zthread_new((zthread_detached_fn *) eventServerStart, eventServer)) != 0) {
        error("Cannot start the event server.");
        return -1;
    }

    // Specific server stuff
    DisconnectEventHandler disconnectHandler;
    switch (serverType) {
    case SERVER_TYPE_BARRACK:
        disconnectHandler = barrackEventServerOnDisconnect;
        break;

    case SERVER_TYPE_ZONE:
        disconnectHandler = zoneEventServerOnDisconnect;
        break;

    case SERVER_TYPE_SOCIAL:
        disconnectHandler = socialEventServerOnDisconnect;
        break;

    default :
        error("Unknown server type.");
        return 0;
        break;
    }

    // === Build the Server ===
    Server *server;
    if (!(server = serverFactoryCreateServer(
                       serverType,
                       routerId,
                       routerIp, port,
                       workersCount,
                       output,
                       globalServerIp, globalServerPort,
                       sqlHostname, sqlUsername, sqlPassword, sqlDatabase,
                       redisHostname, redisPort, disconnectHandler
                   ))) {
        error("Cannot create a Server.");
        return -1;
    }

    // Initialize the Server
    switch (serverType)
    {
    case SERVER_TYPE_BARRACK:
        // Initialize the Barrack Server
        if ((barrackServer = barrackServerNew(server))) {

            // Start the Barrack Server
            if (!barrackServerStart(barrackServer)) {
                error("Cannot start the BarrackServer properly.");
            }

            // Unload the Barrack Server properly
            barrackServerDestroy(&barrackServer);
        }
        else {
            error("Cannot initialize the BarrackServer properly.");
        }
        break;

    case SERVER_TYPE_ZONE:
        // Initialize the Zone Server
        if ((zoneServer = zoneServerNew(server))) {

            // Start the Zone Server
            if (!zoneServerStart(zoneServer)) {
                error("Cannot start the Zone Server properly.");
            }

            // Unload the Zone Server properly
            zoneServerDestroy(&zoneServer);
        }
        else {
            error("Cannot initialize the Zone Server properly.");
        }
        break;

    case SERVER_TYPE_SOCIAL:
        // Initialize the Social Server
        if ((socialServer = socialServerNew(server))) {

            // Start the Social Server
            if (!socialServerStart(socialServer)) {
                error("Cannot start the Social Server properly.");
            }

            // Unload the Social Server properly
            socialServerDestroy(&socialServer);
        }
        else {
            error("Cannot initialize the Social Server properly.");
        }
        break;

    default :
        error("Cannot start an unknown serverType.");
        break;
    }

    // Shutdown the CZMQ layer properly
    zsys_shutdown();

    // Close the custom debug file if necessary
    dbgClose();

    pause();

    return 0;
}
Ejemplo n.º 12
0
int main (void) {
    int i ;
    zthread_new(server_task, NULL);
    zclock_sleep(60*60*1000);
    return 0;
}
Ejemplo n.º 13
0
int main (int argc, char *argv [])
{
    //  First argument is this broker's name
    //  Other arguments are our peers' names
    //
    if (argc < 2) {
        printf ("syntax: peering3 me {you}...\n");
        exit (EXIT_FAILURE);
    }
    self = argv [1];
    printf ("I: preparing broker at %s...\n", self);
    srandom ((unsigned) time (NULL));

    zctx_t *ctx = zctx_new ();

    //  Prepare local frontend and backend
    void *localfe = zsocket_new (ctx, ZMQ_ROUTER);
    zsocket_bind (localfe, "ipc://%s-localfe.ipc", self);

    void *localbe = zsocket_new (ctx, ZMQ_ROUTER);
    zsocket_bind (localbe, "ipc://%s-localbe.ipc", self);

    //  Bind cloud frontend to endpoint
    void *cloudfe = zsocket_new (ctx, ZMQ_ROUTER);
    zsockopt_set_identity (cloudfe, self);
    zsocket_bind (cloudfe, "ipc://%s-cloud.ipc", self);
    
    //  Connect cloud backend to all peers
    void *cloudbe = zsocket_new (ctx, ZMQ_ROUTER);
    zsockopt_set_identity (cloudbe, self);
    int argn;
    for (argn = 2; argn < argc; argn++) {
        char *peer = argv [argn];
        printf ("I: connecting to cloud frontend at '%s'\n", peer);
        zsocket_connect (cloudbe, "ipc://%s-cloud.ipc", peer);
    }
    //  Bind state backend to endpoint
    void *statebe = zsocket_new (ctx, ZMQ_PUB);
    zsocket_bind (statebe, "ipc://%s-state.ipc", self);

    //  Connect state frontend to all peers
    void *statefe = zsocket_new (ctx, ZMQ_SUB);
    zsockopt_set_subscribe (statefe, "");
    for (argn = 2; argn < argc; argn++) {
        char *peer = argv [argn];
        printf ("I: connecting to state backend at '%s'\n", peer);
        zsocket_connect (statefe, "ipc://%s-state.ipc", peer);
    }
    //  Prepare monitor socket
    void *monitor = zsocket_new (ctx, ZMQ_PULL);
    zsocket_bind (monitor, "ipc://%s-monitor.ipc", self);

    //  .split start child tasks
    //  After binding and connecting all our sockets, we start our child
    //  tasks - workers and clients:

    int worker_nbr;
    for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++)
        zthread_new (worker_task, NULL);

    //  Start local clients
    int client_nbr;
    for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++)
        zthread_new (client_task, NULL);

    //  Queue of available workers
    int local_capacity = 0;
    int cloud_capacity = 0;
    zlist_t *workers = zlist_new ();

    //  .split main loop
    //  The main loop has two parts. First we poll workers and our two service
    //  sockets (statefe and monitor), in any case. If we have no ready workers,
    //  there's no point in looking at incoming requests. These can remain on
    //  their internal 0MQ queues:

    while (true) {
        zmq_pollitem_t primary [] = {
            { localbe, 0, ZMQ_POLLIN, 0 },
            { cloudbe, 0, ZMQ_POLLIN, 0 },
            { statefe, 0, ZMQ_POLLIN, 0 },
            { monitor, 0, ZMQ_POLLIN, 0 }
        };
        //  If we have no workers ready, wait indefinitely
        int rc = zmq_poll (primary, 4,
            local_capacity? 1000 * ZMQ_POLL_MSEC: -1);
        if (rc == -1)
            break;              //  Interrupted

        //  Track if capacity changes during this iteration
        int previous = local_capacity;

        //  Handle reply from local worker
        zmsg_t *msg = NULL;

        if (primary [0].revents & ZMQ_POLLIN) {
            msg = zmsg_recv (localbe);
            if (!msg)
                break;          //  Interrupted
            zframe_t *identity = zmsg_unwrap (msg);
            zlist_append (workers, identity);
            local_capacity++;

            //  If it's READY, don't route the message any further
            zframe_t *frame = zmsg_first (msg);
            if (memcmp (zframe_data (frame), WORKER_READY, 1) == 0)
                zmsg_destroy (&msg);
        }
        //  Or handle reply from peer broker
        else
        if (primary [1].revents & ZMQ_POLLIN) {
            msg = zmsg_recv (cloudbe);
            if (!msg)
                break;          //  Interrupted
            //  We don't use peer broker identity for anything
            zframe_t *identity = zmsg_unwrap (msg);
            zframe_destroy (&identity);
        }
        //  Route reply to cloud if it's addressed to a broker
        for (argn = 2; msg && argn < argc; argn++) {
            char *data = (char *) zframe_data (zmsg_first (msg));
            size_t size = zframe_size (zmsg_first (msg));
            if (size == strlen (argv [argn])
            &&  memcmp (data, argv [argn], size) == 0)
                zmsg_send (&msg, cloudfe);
        }
        //  Route reply to client if we still need to
        if (msg)
            zmsg_send (&msg, localfe);

        //  .split handle state messages
        //  If we have input messages on our statefe or monitor sockets we
        //  can process these immediately:

        if (primary [2].revents & ZMQ_POLLIN) {
            char *peer = zstr_recv (statefe);
            char *status = zstr_recv (statefe);
            cloud_capacity = atoi (status);
            free (peer);
            free (status);
        }
        if (primary [3].revents & ZMQ_POLLIN) {
            char *status = zstr_recv (monitor);
            printf ("%s\n", status);
            free (status);
        }
        //  .split route client requests
        //  Now route as many clients requests as we can handle. If we have
        //  local capacity we poll both localfe and cloudfe. If we have cloud
        //  capacity only, we poll just localfe. We route any request locally
        //  if we can, else we route to the cloud.

        while (local_capacity + cloud_capacity) {
            zmq_pollitem_t secondary [] = {
                { localfe, 0, ZMQ_POLLIN, 0 },
                { cloudfe, 0, ZMQ_POLLIN, 0 }
            };
            if (local_capacity)
                rc = zmq_poll (secondary, 2, 0);
            else
                rc = zmq_poll (secondary, 1, 0);
            assert (rc >= 0);

            if (secondary [0].revents & ZMQ_POLLIN)
                msg = zmsg_recv (localfe);
            else
            if (secondary [1].revents & ZMQ_POLLIN)
                msg = zmsg_recv (cloudfe);
            else
                break;      //  No work, go back to primary

            if (local_capacity) {
                zframe_t *frame = (zframe_t *) zlist_pop (workers);
                zmsg_wrap (msg, frame);
                zmsg_send (&msg, localbe);
                local_capacity--;
            }
            else {
                //  Route to random broker peer
                int random_peer = randof (argc - 2) + 2;
                zmsg_pushmem (msg, argv [random_peer], strlen (argv [random_peer]));
                zmsg_send (&msg, cloudbe);
            }
        }
        //  .split broadcast capacity
        //  We broadcast capacity messages to other peers; to reduce chatter
        //  we do this only if our capacity changed.

        if (local_capacity != previous) {
            //  We stick our own identity onto the envelope
            zstr_sendm (statebe, self);
            //  Broadcast new capacity
            zstr_send (statebe, "%d", local_capacity);
        }
    }
    //  When we're done, clean up properly
    while (zlist_size (workers)) {
        zframe_t *frame = (zframe_t *) zlist_pop (workers);
        zframe_destroy (&frame);
    }
    zlist_destroy (&workers);
    zctx_destroy (&ctx);
    return EXIT_SUCCESS;
}