void testbed_controller_cb (void *cls, const struct GNUNET_TESTBED_EventInformation *event)
{
  static int connections = 0;

  switch (event->type)
  {
    case GNUNET_TESTBED_ET_OPERATION_FINISHED:
      /* This part will still be called when
	 GNUNET_TESTBED_peer_get_information() succeeds. However, the code is
	 now more relevant in operation completion callback */
      break;
    case GNUNET_TESTBED_ET_CONNECT:
      connections ++;
      if (connections == 3)
      {
          GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "All peers connected\n");
          GNUNET_TESTBED_operation_done (topology_op);
          topology_op = NULL;
          all_connected ();
      }
      break;
    default:
      /* whatever ... */
      break;
  }
}
/**
*
*	Set/Update cache information for the given display
*	path subset. This function will not do anything
*	if this subset is currently not mapped in the cache
*	(i.e. we're not caching this particular subset)
*
*	\param [in] displays:  array of display paths
*	\param [in] array_size:  size of the above array
*	\param [in] supported:  true if this display path
*	subset can be enabled at the same time, false otherwise
*
*/
void dal_set_subset_supported(
	struct tm_subsets_cache *tm_subsets_cache,
	const uint32_t *displays,
	uint32_t array_size,
	bool supported)
{
	uint32_t index;
	uint32_t word_num;
	uint32_t bit_mask;

	ASSERT(tm_subsets_cache->cofunc_cache != NULL);
	ASSERT(displays != NULL);
	if (tm_subsets_cache->cofunc_cache == NULL ||
		displays == NULL) {
		ASSERT_CRITICAL(0);
		return;
	}


	if (array_size == 1) {

		/* only one display path, so check only displays[0]*/
		if (displays[0] > tm_subsets_cache->num_display_paths)
			return;

		tm_utils_set_bit(
			&tm_subsets_cache->cofunc_cache_single_valid,
			displays[0]);

		if (supported)
			tm_utils_set_bit(
				&tm_subsets_cache->cofunc_cache_single,
				displays[0]);

		return;
	}

	if (all_connected(tm_subsets_cache, displays, array_size) &&
		array_size == tm_subsets_cache->num_connected)
		tm_subsets_cache->all_connected_supported =
			supported ? CQR_SUPPORTED : CQR_NOT_SUPPORTED;


	/* array_size > 1*/
	if (!is_dp_mapping_valid(tm_subsets_cache, displays, array_size))
		/* this case should not really happen as TM
		 * should not call SetSubsetSupported if mapping not valid
		 */
		return;


	index = find_index(tm_subsets_cache, displays, array_size);
	if (index > tm_subsets_cache->max_num_combinations) {
		/* this should not happen*/
		BREAK_TO_DEBUGGER();
		return;
	}

	/* If we have index K, we want to modify bits
	 * 2K and 2K+1 in the cache. Since cache is
	 * internally represented as an uint32_t array,
	 * we first convert this into bytes. 1 element
	 * has sizeof(int)*8 bits, so 2K'th bit is
	 * contained in the integer array element at
	 * location
	 * wordNum = (2K) / (sizeof(int)*8) = K / (sizeof(int)*4)
	 * bitMask is the offset within
	 * tm_subsets_cache->cofunc_cache[wordNum] - it's the
	 * remainder of the above division, multiplied by 2
	 * Note that 2 bits directly correspond to the
	 * enum 0x0 = Unknown, 0x1 = NotSupported, 0x2 = Supported...
	 * i.e. it's not that 1 bit is for valid or not,
	 * and the other for supported or not.
	 */
	/* *4 instead of *8 since every subset uses 2 bits*/
	word_num = index / (sizeof(uint32_t)*4);
	bit_mask = supported ? 0x2 : 0x1;
	/* now move it to the right location within those 32 bits*/
	bit_mask = bit_mask << ((index % (sizeof(uint32_t)*4)) * 2);

	*(tm_subsets_cache->cofunc_cache + word_num) |= bit_mask;
}
/**
*	Check whether the given subset of display
*	paths is supported, i.e. if the display paths
*	can be enabled at the same time.
*
*	\param [in] displays: array of display paths for
*	which we will check whether they can be
*	enabled at the same time
*	\param [in] array_size: size of the above array
*
*	\return
*	CacheQueryResult enum:
*	Supported - the given subset is supported (cache hit)
*	NotSupported - the given subset is supported (cache hit)
*
*	Unknown - this display path subset is currently mapped
*	in the cache, but this is
*	the first query so it is not known whether
*	it's supported or not.
*	The caller must do a noncached lookup
*	and update the cache via
*	SetSubsetSupported() (cache miss)
*
*	DPMappingNotValid - this display path subset is currently
*	not being cached. The caller must
*	do a noncached lookup and not
*	attempt to update cache, since it will
*	fail (cache miss)
*
*/
enum cache_query_result dal_is_subset_supported(
	struct tm_subsets_cache *tm_subsets_cache,
	const uint32_t *displays,
	uint32_t array_size)
{
	uint32_t index;
	uint32_t word_num;
	uint32_t bit_mask;
	uint32_t ret;
	struct dal_context *dal_context = tm_subsets_cache->dal_context;

	ASSERT(displays != NULL);

	if (tm_subsets_cache->cofunc_cache == NULL ||
		displays != NULL) {
		ASSERT_CRITICAL(0);
		return CQR_DP_MAPPING_NOT_VALID;
	}

	if (array_size == 1) {

		ASSERT(displays[0] < tm_subsets_cache->num_display_paths);

		if (!tm_utils_test_bit(
			&tm_subsets_cache->cofunc_cache_single_valid,
			displays[0]))
			return CQR_UNKNOWN;
		if (tm_utils_test_bit(
			&tm_subsets_cache->cofunc_cache_single_valid,
			displays[0]))
			return CQR_SUPPORTED;
		/* mapping always valid for size == 1*/
		else
			return CQR_NOT_SUPPORTED;

	}

	/* check if this is a query for all connected
	 * (enabled) ones, which is the most common query observed
	 */
	if (array_size <= tm_subsets_cache->num_connected &&
		array_size <= tm_subsets_cache->max_num_cofunc_targets &&
		tm_subsets_cache->all_connected_supported != CQR_UNKNOWN) {

		if (all_connected(tm_subsets_cache, displays, array_size)) {
			if (tm_subsets_cache->all_connected_supported ==
				CQR_SUPPORTED)
				return CQR_SUPPORTED;
			/* if all connected are not supported, and the subset
			 * is smaller, it could be that it's supported,
			 * in that case we don't return here
			 */
			else if (array_size ==
				tm_subsets_cache->num_connected)
				return CQR_NOT_SUPPORTED;
		}
	}

	/* array_size > 1*/
	/* asking for a disconnected one with array_size > 1?*/
	/* the caller should do noncached lookup
	 * and return result, but not update the cache
	 */
	if (!is_dp_mapping_valid(tm_subsets_cache, displays, array_size))
		return CQR_DP_MAPPING_NOT_VALID;

	index = find_index(tm_subsets_cache, displays, array_size);
	if (index > tm_subsets_cache->max_num_combinations) {

		if (array_size > tm_subsets_cache->max_num_cofunc_targets)
			return CQR_NOT_SUPPORTED;

		/* this should not happen, fall back
		 * to noncached lookup without updating cache
		 */
		TM_ERROR("%s: Invalid index", __func__);
		return CQR_DP_MAPPING_NOT_VALID;
	}

	/* If we have index K, we want to read
	 * bits 2K and 2K+1 in the cache.
	 * Since cache is internally represented as an
	 * uint32_t array, we first convert this into bytes.
	 * 1 element has sizeof(int)*8 bits, so 2K'th bit is
	 * contained in the integer array element at location
	 * wordNum = (2K) / (sizeof(int)*8) = K / (sizeof(int)*4).
	 * bitMask is the offset within
	 * tm_subsets_cache->cofunc_cache[wordNum] - it's the
	 * remainder of the above division, multiplied by 2
	 * Note that 2 bits directly correspond to the
	 * enum 0x0 = Unknown, 0x1 = NotSupported, 0x2 = Supported...
	 * I.e. it's not that 1 bit is for valid or not, and the other
	 * for supported or not.
	 */

	/* *4 instead of *8 since every subset uses 2 bits*/
	word_num = index / (sizeof(uint32_t)*4);
	bit_mask = 0x3 << ((index % (sizeof(uint32_t)*4)) * 2);

	ret = (*(tm_subsets_cache->cofunc_cache +
		word_num) & bit_mask) >> ((index % (sizeof(uint32_t)*4)) * 2);
	return (enum cache_query_result)(ret);
}