Example #1
0
int main(int argc, char const * const *argv) {
  char const *hostname;
  int port;
  int rate_limit;
  int message_count;

  int sockfd;
  amqp_connection_state_t conn;

  if (argc < 5) {
    fprintf(stderr, "Usage: amqp_producer host port rate_limit message_count\n");
    return 1;
  }

  hostname = argv[1];
  port = atoi(argv[2]);
  rate_limit = atoi(argv[3]);
  message_count = atoi(argv[4]);

  conn = amqp_new_connection();

  die_on_error(sockfd = amqp_open_socket(hostname, port), "Opening socket");
  amqp_set_sockfd(conn, sockfd);
  die_on_amqp_error(amqp_login(conn, "/", 0, 131072, 0, AMQP_SASL_METHOD_PLAIN, "guest", "guest"),
		    "Logging in");
  amqp_channel_open(conn, 1);
  die_on_amqp_error(amqp_get_rpc_reply(conn), "Opening channel");

  send_batch(conn, "test queue", rate_limit, message_count);

  die_on_amqp_error(amqp_channel_close(conn, 1, AMQP_REPLY_SUCCESS), "Closing channel");
  die_on_amqp_error(amqp_connection_close(conn, AMQP_REPLY_SUCCESS), "Closing connection");
  die_on_error(amqp_destroy_connection(conn), "Ending connection");
  return 0;
}
static gpointer 
search_thread_func (gpointer user_data)
{
	SearchThreadData *data;
	GFile *dir;
	GFileInfo *info;
	const char *id;

	data = user_data;

	/* Insert id for toplevel directory into visited */
	dir = g_queue_peek_head (data->directories);
	info = g_file_query_info (dir, G_FILE_ATTRIBUTE_ID_FILE, 0, data->cancellable, NULL);
	if (info) {
		id = g_file_info_get_attribute_string (info, G_FILE_ATTRIBUTE_ID_FILE);
		if (id) {
			g_hash_table_insert (data->visited, g_strdup (id), NULL);
		}
		g_object_unref (info);
	}
	
	while (!g_cancellable_is_cancelled (data->cancellable) &&
	       (dir = g_queue_pop_head (data->directories)) != NULL) {
		visit_directory (dir, data);
		g_object_unref (dir);
	}
	send_batch (data);

	g_idle_add (search_thread_done_idle, data);
	
	return NULL;
}
Example #3
0
void
mpi_process_group::maybe_send_batch(process_id_type dest) const
{
#ifndef NO_SPLIT_BATCHES  
  impl::outgoing_messages& outgoing = impl_->outgoing[dest];
  if (outgoing.buffer.size() >= impl_->batch_buffer_size ||
      outgoing.headers.size() >= impl_->batch_header_number) {
    // we are full and need to send
    outgoing_messages batch;
    batch.buffer.reserve(impl_->batch_buffer_size);
    batch.swap(outgoing);
    if (batch.buffer.size() >= impl_->batch_buffer_size 
         && batch.headers.size()>1 ) {
      // we are too large, keep the last message in the outgoing buffer
      std::copy(batch.buffer.begin()+batch.headers.back().offset,
                batch.buffer.end(),std::back_inserter(outgoing.buffer));
      batch.buffer.resize(batch.headers.back().offset);
      outgoing.headers.push_back(batch.headers.back());
      batch.headers.pop_back();
      outgoing.headers.front().offset=0;
    }
    send_batch(dest,batch);
  }
#endif
}
Example #4
0
void Reyes::RendererCL::draw_patches(void* patches_handle,
                                     const mat4& matrix,
                                     const Projection* projection,
                                     const vec4& color)
{
    mat4 proj;
    projection->calc_projection(proj);

    _bound_n_split->init(patches_handle, matrix, projection);

    PatchType patch_type = _patch_index->get_patch_type(patches_handle);
    
    while (!_bound_n_split->done()) {
        
        Batch batch = _bound_n_split->do_bound_n_split(_last_batch);


        if (!reyes_config.dummy_render()) {
            vec4 out_color = color;

            if (reyes_config.pass_color_mode()) {
                float pass_count = statistics.get_pass_count() / 100.0f * 360;
                out_color = vec4(glm::rgbColor(vec3(pass_count,1.0f,1.0f)),1.0f);
            }
            
            _last_batch = send_batch(batch, matrix, proj, out_color, patch_type, batch.transfer_done | _last_batch);
        } else {
            _last_batch = batch.transfer_done;
        }
    }
}
int main(int argc, char const *const *argv)
{
  char const *hostname;
  int port, status;
  int rate_limit;
  int message_count;
  amqp_socket_t *socket;
  amqp_connection_state_t conn;

  if (argc < 5) {
    fprintf(stderr, "Usage: amqps_producer host port rate_limit message_count "
            "[cacert.pem [key.pem cert.pem]]\n");
    return 1;
  }

  hostname = argv[1];
  port = atoi(argv[2]);
  rate_limit = atoi(argv[3]);
  message_count = atoi(argv[4]);

  conn = amqp_new_connection();

  socket = amqp_ssl_socket_new(conn);
  if (!socket) {
    die("creating SSL/TLS socket");
  }

  if (argc > 5) {
    status = amqp_ssl_socket_set_cacert(socket, argv[5]);
    if (status) {
      die("setting CA certificate");
    }
  }

  if (argc > 7) {
    status = amqp_ssl_socket_set_key(socket, argv[7], argv[6]);
    if (status) {
      die("setting client cert");
    }
  }

  status = amqp_socket_open(socket, hostname, port);
  if (status) {
    die("opening SSL/TLS connection");
  }

  die_on_amqp_error(amqp_login(conn, "/", 0, 131072, 0, AMQP_SASL_METHOD_PLAIN, "guest", "guest"),
                    "Logging in");
  amqp_channel_open(conn, 1);
  die_on_amqp_error(amqp_get_rpc_reply(conn), "Opening channel");

  send_batch(conn, "test queue", rate_limit, message_count);

  die_on_amqp_error(amqp_channel_close(conn, 1, AMQP_REPLY_SUCCESS), "Closing channel");
  die_on_amqp_error(amqp_connection_close(conn, AMQP_REPLY_SUCCESS), "Closing connection");
  die_on_error(amqp_destroy_connection(conn), "Ending connection");
  return 0;
}
Example #6
0
void
mpi_process_group::send_batch(process_id_type dest) const
{
  impl::outgoing_messages& outgoing = impl_->outgoing[dest];
  if (outgoing.headers.size()) {
    // need to copy to avoid race conditions
    outgoing_messages batch;
    batch.buffer.reserve(impl_->batch_buffer_size);
    batch.swap(outgoing); 
    send_batch(dest,batch);
  }
}
//static uint count = 0;
// A new batch of acceleration data was received.
static void data_handler(AccelData *data, uint32_t num_samples) {	
	unsigned int batch[CFG_BATCH_SIZE];
	
	for(uint sample = 0; sample < num_samples; sample++){
		//batch[sample] = ++count;
		batch[sample] = get_vertical_acceleration(data[sample].x, data[sample].y, data[sample].z);
	}
	
	add_to_store(batch);
	
	if(!sending){
		send_batch();
	}
}
static gpointer 
search_thread_func (gpointer user_data)
{
#ifdef HAVE_FTW_H
  SearchThreadData *data;
  
  data = user_data;
  
  g_private_set (&search_thread_data, data);

  nftw (data->path, search_visit_func, 20,
#ifdef HAVE_GNU_FTW
        FTW_ACTIONRETVAL |
#endif
        FTW_PHYS);

  send_batch (data);
  
  gdk_threads_add_idle (search_thread_done_idle, data);
#endif /* HAVE_FTW_H */
  
  return NULL;
}
Example #9
0
void mpi_process_group::synchronize() const
{
  // Don't synchronize if we've already finished
  if (boost::mpi::environment::finalized()) 
    return;

#ifdef DEBUG
  std::cerr << "SYNC: " << process_id(*this) << std::endl;
#endif

  emit_on_synchronize();

  process_id_type id = process_id(*this);     // Our rank
  process_size_type p = num_processes(*this); // The number of processes

  // Pack the remaining incoming messages into the beginning of the
  // buffers, so that we can receive new messages in this
  // synchronization step without losing those messages that have not
  // yet been received.
  pack_headers();

  impl_->synchronizing_stage[id] = -1;
  int stage=-1;
  bool no_new_messages = false;
  while (true) {
      ++stage;
#ifdef DEBUG
      std::cerr << "SYNC: " << id << " starting stage " << (stage+1) << ".\n";
#endif

      // Tell everyone that we are synchronizing. Note: we use MPI_Isend since 
      // we absolutely cannot have any of these operations blocking.
      
      // increment the stage for the source
       ++impl_->synchronizing_stage[id];
       if (impl_->synchronizing_stage[id] != stage)
         std::cerr << "Expected stage " << stage << ", got " << impl_->synchronizing_stage[id] << std::endl;
       BOOST_ASSERT(impl_->synchronizing_stage[id]==stage);
      // record how many still have messages to be sent
      if (static_cast<int>(impl_->synchronizing_unfinished.size())<=stage) {
        BOOST_ASSERT(static_cast<int>(impl_->synchronizing_unfinished.size()) == stage);
        impl_->synchronizing_unfinished.push_back(no_new_messages ? 0 : 1);
      }
      else
        impl_->synchronizing_unfinished[stage]+=(no_new_messages ? 0 : 1);

      // record how many are in that stage
      if (static_cast<int>(impl_->processors_synchronizing_stage.size())<=stage) {
        BOOST_ASSERT(static_cast<int>(impl_->processors_synchronizing_stage.size()) == stage);
        impl_->processors_synchronizing_stage.push_back(1);
      }
      else
        ++impl_->processors_synchronizing_stage[stage];

      impl_->synchronizing = true;

      for (int dest = 0; dest < p; ++dest) {
        int sync_message = no_new_messages ? -1 : impl_->number_sent_batches[dest];
        if (dest != id) {
          impl_->number_sent_batches[dest]=0;       
          MPI_Request request;
          MPI_Isend(&sync_message, 1, MPI_INT, dest, msg_synchronizing, impl_->comm,&request);
          int done=0;
          do {
            poll();
            MPI_Test(&request,&done,MPI_STATUS_IGNORE);
          } while (!done);
        }
        else { // need to subtract how many messages I should have received
          impl_->number_received_batches[id] -=impl_->number_sent_batches[id];
          impl_->number_sent_batches[id]=0;
        }
      }

      // Keep handling out-of-band messages until everyone has gotten
      // to this point.
      while (impl_->processors_synchronizing_stage[stage] <p) {
        // with the trigger based solution we cannot easily pass true here 
        poll(/*wait=*/false, -1, true);

      }

      // check that everyone is at least here
      for (int source=0; source<p ; ++source)
        BOOST_ASSERT(impl_->synchronizing_stage[source] >= stage);

      // receive any batches sent in the meantime
      // all have to be available already
      while (true) {
        bool done=true;
        for (int source=0; source<p ; ++source)
          if(impl_->number_received_batches[source] < 0)
            done = false;
        if (done)
          break;
        poll(false,-1,true);
      }
      
#ifndef NO_IMMEDIATE_PROCESSING
      for (int source=0; source<p ; ++source)
        BOOST_ASSERT(impl_->number_received_batches[source] >= 0);
#endif

      impl_->synchronizing = false;
      
      // Flush out remaining messages
      if (impl_->synchronizing_unfinished[stage]==0)
        break;
#ifdef NO_IMMEDIATE_PROCESSING
      for (process_id_type dest = 0; dest < p; ++dest)
        process_batch(dest);
#endif

      no_new_messages = true;
      for (process_id_type dest = 0; dest < p; ++dest) {
        if (impl_->outgoing[dest].headers.size() || 
            impl_->number_sent_batches[dest]>0)
          no_new_messages = false;
        send_batch(dest);
      }
    }

  impl_->comm.barrier/*nomacro*/();
#if 0
  // set up for next synchronize call
  for (int source=0; source<p; ++source) {
    if (impl_->synchronizing_stage[source] != stage) {
      std::cerr << id << ": expecting stage " << stage << " from source "
                << source << ", got " << impl_->synchronizing_stage[source]
                << std::endl;
    }
    BOOST_ASSERT(impl_->synchronizing_stage[source]==stage);
  }
#endif
  std::fill(impl_->synchronizing_stage.begin(),
            impl_->synchronizing_stage.end(), -1);
            
  // get rid of the information regarding recorded numbers of processors
  // for the stages we just finished
  impl_->processors_synchronizing_stage.clear();
  impl_->synchronizing_unfinished.clear();

  for (process_id_type dest = 0; dest < p; ++dest)
    BOOST_ASSERT (impl_->outgoing[dest].headers.empty());
#ifndef NO_IMMEDIATE_PROCESSING
      for (int source=0; source<p ; ++source)
        BOOST_ASSERT (impl_->number_received_batches[source] == 0);
#endif

  impl_->free_sent_batches();
#ifdef DEBUG
  std::cerr << "SYNC: " << process_id(*this) << " completed." << std::endl;
#endif
}
static void outbox_sent_callback(DictionaryIterator *iterator, void *context) {
	update_status(true);
	remove_from_store(CFG_BATCH_SIZE);
	send_batch();
}
static void outbox_failed_callback(DictionaryIterator *iterator, AppMessageResult reason, void *context) {
	APP_LOG(APP_LOG_LEVEL_ERROR, "Outbox send failed: %i - %s", reason, translate_error(reason));
	update_status(false);
	send_batch();
}
static void
visit_directory (GFile *dir, SearchThreadData *data)
{
	GFileEnumerator *enumerator;
	GFileInfo *info;
	GFile *child;
	const char *mime_type, *display_name;
	char *lower_name, *normalized;
	gboolean hit;
	int i;
	GList *l;
	const char *id;
	gboolean visited;

	enumerator = g_file_enumerate_children (dir,
						data->mime_types != NULL ?
						STD_ATTRIBUTES ","
						G_FILE_ATTRIBUTE_STANDARD_CONTENT_TYPE
						:
						STD_ATTRIBUTES
						,
						0, data->cancellable, NULL);
	
	if (enumerator == NULL) {
		return;
	}

	while ((info = g_file_enumerator_next_file (enumerator, data->cancellable, NULL)) != NULL) {
		if (g_file_info_get_is_hidden (info)) {
			goto next;
		}
		
		display_name = g_file_info_get_display_name (info);
		if (display_name == NULL) {
			goto next;
		}
		
		normalized = g_utf8_normalize (display_name, -1, G_NORMALIZE_NFD);
		lower_name = g_utf8_strdown (normalized, -1);
		g_free (normalized);
		
		hit = data->words_and;
		for (i = 0; data->words[i] != NULL; i++) {
			if (data->word_strstr[i]) {
				if ((strstr (lower_name, data->words[i]) != NULL)^data->words_and) {
					hit = !data->words_and;
					break;
				}
			}
			else if (strwildcardcmp (data->words[i], lower_name)^data->words_and) {
				hit = !data->words_and;
				break;
			}
		}
		g_free (lower_name);
		
		if (hit && data->mime_types) {
			mime_type = g_file_info_get_content_type (info);
			hit = FALSE;
			
			for (l = data->mime_types; mime_type != NULL && l != NULL; l = l->next) {
				if (g_content_type_equals (mime_type, l->data)) {
					hit = TRUE;
					break;
				}
			}
		}
		
		child = g_file_get_child (dir, g_file_info_get_name (info));
		
		if (hit) {
			data->uri_hits = g_list_prepend (data->uri_hits, g_file_get_uri (child));
		}
		
		data->n_processed_files++;
		if (data->n_processed_files > BATCH_SIZE) {
			send_batch (data);
		}

		if (g_file_info_get_file_type (info) == G_FILE_TYPE_DIRECTORY) {
			id = g_file_info_get_attribute_string (info, G_FILE_ATTRIBUTE_ID_FILE);
			visited = FALSE;
			if (id) {
				if (g_hash_table_lookup_extended (data->visited,
								  id, NULL, NULL)) {
					visited = TRUE;
				} else {
					g_hash_table_insert (data->visited, g_strdup (id), NULL);
				}
			}
			
			if (!visited) {
				g_queue_push_tail (data->directories, g_object_ref (child));
			}
		}
		
		g_object_unref (child);
	next:
		g_object_unref (info);
	}

	g_object_unref (enumerator);
}
Example #13
0
static void
send_batch(struct mnl_socket *nl, struct mnl_nlmsg_batch *b, int portid)
{
	int ret, fd = mnl_socket_get_fd(nl);
	size_t len = mnl_nlmsg_batch_size(b);
	char rcv_buf[MNL_SOCKET_BUFFER_SIZE];

	ret = mnl_socket_sendto(nl, mnl_nlmsg_batch_head(b), len);
	if (ret == -1) {
		perror("mnl_socket_recvfrom");
		exit(EXIT_FAILURE);
	}

	/* receive and digest all the acknowledgments from the kernel. */
	struct timeval tv = {
		.tv_sec		= 0,
		.tv_usec	= 0
	};
	fd_set readfds;
	FD_ZERO(&readfds);
	FD_SET(fd, &readfds);

	ret = select(fd+1, &readfds, NULL, NULL, &tv);
	if (ret == -1) {
		perror("select");
		exit(EXIT_FAILURE);
	}
	while (ret > 0 && FD_ISSET(fd, &readfds)) {
		ret = mnl_socket_recvfrom(nl, rcv_buf, sizeof(rcv_buf));
		if (ret == -1) {
			perror("mnl_socket_recvfrom");
			exit(EXIT_FAILURE);
		}

		ret = mnl_cb_run2(rcv_buf, ret, 0, portid,
				  NULL, NULL, cb_ctl_array,
				  MNL_ARRAY_SIZE(cb_ctl_array));
		if (ret == -1) {
			perror("mnl_cb_run");
			exit(EXIT_FAILURE);
		}

		ret = select(fd+1, &readfds, NULL, NULL, &tv);
		if (ret == -1) {
			perror("select");
			exit(EXIT_FAILURE);
		}
		FD_ZERO(&readfds);
		FD_SET(fd, &readfds);
	}
}

int main(void)
{
	struct mnl_socket *nl;
	char snd_buf[MNL_SOCKET_BUFFER_SIZE*2];
	struct mnl_nlmsg_batch *b;
	int j;
	unsigned int seq, portid;
	uint16_t i;

	nl = mnl_socket_open(NETLINK_NETFILTER);
	if (nl == NULL) {
		perror("mnl_socket_open");
		exit(EXIT_FAILURE);
	}
	if (mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID) < 0) {
		perror("mnl_socket_bind");
		exit(EXIT_FAILURE);
	}
	portid = mnl_socket_get_portid(nl);

	/* The buffer that we use to batch messages is MNL_SOCKET_BUFFER_SIZE
	 * multiplied by 2 bytes long, but we limit the batch to half of it
	 * since the last message that does not fit the batch goes over the
	 * upper boundary, if you break this rule, expect memory corruptions. */
	b = mnl_nlmsg_batch_start(snd_buf, MNL_SOCKET_BUFFER_SIZE);
	if (b == NULL) {
		perror("mnl_nlmsg_batch_start");
		exit(EXIT_FAILURE);
	}

	seq = time(NULL);
	for (i=1024, j=0; i<65535; i++, j++) {
		put_msg(mnl_nlmsg_batch_current(b), i, seq+j);

		/* is there room for more messages in this batch?
		 * if so, continue. */
		if (mnl_nlmsg_batch_next(b))
			continue;

		send_batch(nl, b, portid);

		/* this moves the last message that did not fit into the
		 * batch to the head of it. */
		mnl_nlmsg_batch_reset(b);
	}

	/* check if there is any message in the batch not sent yet. */
	if (!mnl_nlmsg_batch_is_empty(b))
		send_batch(nl, b, portid);

	mnl_nlmsg_batch_stop(b);
	mnl_socket_close(nl);

	return 0;
}
Example #14
0
static int
search_visit_func (const char        *fpath,
		   const struct stat *sb,
		   int                typeflag,
		   struct FTW        *ftwbuf)
{
  SearchThreadData *data;
  gint i;
  const gchar *name;
  gchar *lower_name;
  gchar *uri;
  gboolean hit;
  gboolean is_hidden;

  data = (SearchThreadData*)g_private_get (&search_thread_data);

  if (data->cancelled)
#ifdef HAVE_GNU_FTW
    return FTW_STOP;
#else
    return 1;
#endif /* HAVE_GNU_FTW */

  name = strrchr (fpath, '/');
  if (name)
    name++;
  else
    name = fpath;

  is_hidden = *name == '.';
	
  hit = FALSE;
  
  if (!is_hidden) 
    {
      lower_name = g_ascii_strdown (name, -1);
      
      hit = TRUE;
      for (i = 0; data->words[i] != NULL; i++) 
	{
	  if (strstr (lower_name, data->words[i]) == NULL) 
	    {
	      hit = FALSE;
	      break;
	    }
	}
      g_free (lower_name);
    }

  if (hit) 
    {
      uri = g_filename_to_uri (fpath, NULL, NULL);
      data->uri_hits = g_list_prepend (data->uri_hits, uri);
    }

  data->n_processed_files++;
  
  if (data->n_processed_files > BATCH_SIZE)
    send_batch (data);

#ifdef HAVE_GNU_FTW
  if (is_hidden)
    return FTW_SKIP_SUBTREE;
  else
    return FTW_CONTINUE;
#else
  return 0;
#endif /* HAVE_GNU_FTW */
}