Ejemplo n.º 1
0
inline HeapRegion* HeapRegionManager::at(uint index) const {
  assert(is_available(index), "pre-condition");
  HeapRegion* hr = _regions.get_by_index(index);
  assert(hr != NULL, "sanity");
  assert(hr->hrm_index() == index, "sanity");
  return hr;
}
Ejemplo n.º 2
0
// --------------------------------------------
// matches
// --------------------------------------------
static bool matches(Tile* tiles, int x, int y, const Tile& t) {
	if (!is_available(tiles, x, y)) {
		return false;
	}
	Tile& other = tiles[x + y * MAX_X];
	return t.color == other.color;
}
Ejemplo n.º 3
0
static void
modem_removed (DBusGProxy *proxy, const char *path, gpointer user_data)
{
	GeoclueGsmlocMm *self = GEOCLUE_GSMLOC_MM (user_data);
	GeoclueGsmlocMmPrivate *priv = GEOCLUE_GSMLOC_MM_GET_PRIVATE (self);
	Modem *modem;

	modem = find_modem (self, path);
	if (modem) {
		gboolean old_available = is_available (self);

		priv->modems = g_slist_remove (priv->modems, modem);
		modem_free (modem);
		if (is_available (self) != old_available)
			g_object_notify (G_OBJECT (self), "available");
	}
}
Ejemplo n.º 4
0
  HeapRegion* allocate_free_region(bool is_old) {
    HeapRegion* hr = _free_list.remove_region(is_old);

    if (hr != NULL) {
      assert(hr->next() == NULL, "Single region should not have next");
      assert(is_available(hr->hrm_index()), "Must be committed");
    }
    return hr;
  }
Ejemplo n.º 5
0
void HeapRegionManager::shrink_at(uint index, size_t num_regions) {
#ifdef ASSERT
  for (uint i = index; i < (index + num_regions); i++) {
    assert(is_available(i), "Expected available region at index %u", i);
    assert(at(i)->is_empty(), "Expected empty region at index %u", i);
    assert(at(i)->is_free(), "Expected free region at index %u", i);
  }
#endif
  uncommit_regions(index, num_regions);
}
Ejemplo n.º 6
0
unsigned long measure_ram(void)
{
	int * pos = (int*)0x100000;
	do
	{
		pos += 0x1000;
	} while (is_available(pos));
	
	return (unsigned long)pos;
}
Ejemplo n.º 7
0
cvm::atom_group *colvar::cvc::parse_group(std::string const &conf,
                                          char const *group_key,
                                          bool optional)
{
  cvm::atom_group *group = NULL;

  if (key_lookup(conf, group_key)) {
    group = new cvm::atom_group;
    group->key = group_key;

    if (b_try_scalable) {
      // TODO rewrite this logic in terms of dependencies
      if (is_available(f_cvc_scalable_com) && is_available(f_cvc_com_based)) {
        enable(f_cvc_scalable_com);
        enable(f_cvc_scalable);
        group->enable(f_ag_scalable_com);
        group->enable(f_ag_scalable);
      }

      // TODO check for other types of parallelism here

      if (is_enabled(f_cvc_scalable)) {
        cvm::log("Will enable scalable calculation for group \""+group->key+"\".\n");
      } else {
        cvm::log("Scalable calculation is not available for group \""+group->key+"\" with the current configuration.\n");
      }
    }

    if (group->parse(conf) == COLVARS_OK) {
      atom_groups.push_back(group);
    } else {
      cvm::error("Error parsing definition for atom group \""+
                         std::string(group_key)+"\".\n");
    }
  } else {
    if (! optional) {
      cvm::error("Error: definition for atom group \""+
                      std::string(group_key)+"\" not found.\n");
    }
  }
  return group;
}
Ejemplo n.º 8
0
void get_colors(Tile* tiles, int col, int* colors) {
	for (int y = 0; y < MAX_Y; ++y) {
		if (is_available(tiles, col, y)) {
			Tile& t = tiles[get_tiles_index(col, y)];
			colors[y] = t.color;
		}
		else {
			colors[y] = -1;
		}
	}
}
Ejemplo n.º 9
0
/* Tries to fill the cell (i, j) with the next available number.
Returns a flag to indicate if it succeeded. */
bool advance_cell(int i, int j)
{
    int n = clear_cell(i, j);
    while (++n <= 9) {
        if (is_available(i, j, n)) {
            set_cell(i, j, n);
            return true;
        }
    }
    return false;
}
Ejemplo n.º 10
0
Real ODERatelawCppCallback::deriv_func(
        state_container_type const &reactants_state_array,
        state_container_type const &products_state_array, 
        Real const volume, Real const t,
        ODEReactionRule const &rr)
{
    if (!is_available())
    {
        throw IllegalState("Callback Function has not been registerd");
    }
    return this->func_(reactants_state_array, products_state_array, volume, t, rr);
}
Ejemplo n.º 11
0
static void
get_property (GObject *object, guint prop_id,
              GValue *value, GParamSpec *pspec)
{
	switch (prop_id) {
	case PROP_AVAILABLE:
		g_value_set_boolean (value, is_available (GEOCLUE_GSMLOC_MM (object)));
		break;
	default:
		G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
		break;
	}
}
Ejemplo n.º 12
0
int colvar::cvc::setup()
{
  size_t i;
  description = "cvc " + name;

  for (i = 0; i < atom_groups.size(); i++) {
    add_child((cvm::deps *) atom_groups[i]);
  }

  if (b_try_scalable && is_available(f_cvc_scalable)) {
    enable(f_cvc_scalable);
  }

  return COLVARS_OK;
}
Ejemplo n.º 13
0
Real ODERatelawCythonCallback::deriv_func(
    state_container_type const &reactants_state_array,
    state_container_type const &products_state_array, 
    Real const volume, Real const t,
    ODEReactionRule const &rr)
{
    if (!is_available())
    {
        throw IllegalState("Callback Function has not been registerd");
    }
    ODEReactionRule rr_tempolrary(rr);
    return this->indirect_func_(
        this->python_func_, reactants_state_array, products_state_array, 
        volume, t, &rr_tempolrary);
}
Ejemplo n.º 14
0
void Peer::ips_changed (const char *remote, const char *local)
{
    if (g_strcmp0 (remote, remote_host_.c_str()) == 0 &&
        g_strcmp0 (local, local_host_.c_str()) == 0)
        return;

    auto was_available = is_available();

    if (g_strcmp0 (remote, "0.0.0.0") == 0)
        remote_host_.clear();
    else
        remote_host_ = std::string(remote);

    if (g_strcmp0 (local, "0.0.0.0") == 0)
        local_host_.clear();
    else
        local_host_ = std::string(local);

    if (!observer_)
        return;

    if (was_available != is_available())
        observer_->on_availability_changed(this);
}
Ejemplo n.º 15
0
void HeapRegionManager::iterate(HeapRegionClosure* blk) const {
  uint len = max_length();

  for (uint i = 0; i < len; i++) {
    if (!is_available(i)) {
      continue;
    }
    guarantee(at(i) != NULL, "Tried to access region %u that has a NULL HeapRegion*", i);
    bool res = blk->doHeapRegion(at(i));
    if (res) {
      blk->incomplete();
      return;
    }
  }
}
Ejemplo n.º 16
0
void solve(int count){
    
    if(count == N){
        ans += 1;
        return;
    }
    
    for(int i=0; i<N; i++){
        if(is_available(count, i)){
            field[count][i] = 1;
            solve(count + 1);
            field[count][i] = 0;
        }
    }
}
Ejemplo n.º 17
0
void Shop::ShopItem::choose(squadst& customers, int& buyer) const
{
   if (is_available())
   {
      ledger.subtract_funds(adjusted_price(), EXPENSE_SHOPPING);
      switch (itemclass_)
      {
         case WEAPON:
         {
            Weapon* i = new Weapon(*weapontype[getweapontype(itemtypename_)]);
            customers.squad[buyer]->give_weapon(*i, &location[customers.squad[0]->base]->loot);
            if (i->empty())
               delete i;
            else
               location[customers.squad[0]->base]->loot.push_back(i);
            break;
         }
         case CLIP:
         {
            Clip* i = new Clip(*cliptype[getcliptype(itemtypename_)]);
            customers.squad[buyer]->take_clips(*i, 1);
            if (i->empty())
               delete i;
            else
               location[customers.squad[0]->base]->loot.push_back(i);
            break;
         }
         case ARMOR:
         {
            Armor* i = new Armor(*armortype[getarmortype(itemtypename_)]);
            customers.squad[buyer]->give_armor(*i, &location[customers.squad[0]->base]->loot);
            if (i->empty())
               delete i;
            else
               location[customers.squad[0]->base]->loot.push_back(i);
            break;
         }
         case LOOT:
         {
            Loot* i = new Loot(*loottype[getloottype(itemtypename_)]);
            location[customers.squad[0]->base]->loot.push_back(i);
            break;
         }
      }
   }
}
Ejemplo n.º 18
0
static void
kill_modems (GeoclueGsmlocMm *self)
{
	GeoclueGsmlocMmPrivate *priv = GEOCLUE_GSMLOC_MM_GET_PRIVATE (self);
	gboolean old_available = is_available (self);
	GSList *iter;

	/* Kill all modems */
	for (iter = priv->modems; iter; iter = g_slist_next (iter))
		modem_free ((Modem *) iter->data);
	g_slist_free (priv->modems);
	priv->modems = NULL;

	/* No more modems; clearly location is no longer available */
	if (old_available)
		g_object_notify (G_OBJECT (self), "available");
}
Ejemplo n.º 19
0
// -------------------------------------------------------
// determine edges
//   8
// 1    2
//   4
// -------------------------------------------------------
static int determineEdge(Tile* tiles, int x, int y, const Tile& t) {
	int set = 0;
	if (is_available(tiles, x, y)) {
		if (matches(tiles, x - 1, y, t)) {
			set |= 1;
		}
		if (matches(tiles, x + 1, y, t)) {
			set |= 2;
		}
		if (matches(tiles, x, y - 1, t)) {
			set |= 4;
		}
		if (matches(tiles, x, y + 1, t)) {
			set |= 8;
		}
	}
	return set;
}
Ejemplo n.º 20
0
void compare_permutations(latin_grid square1, latin_grid square2) {
  // search the space of permutations of the rows of the second square
  // stop if the current pair is worse in every metric than a pair we've found
  
  int i;
  for (i = 0; i < square1->size; i++) {
    //printf("row %d available? %d (%d)\n", i, is_available(i), rows_used);
    if (is_available(i)) {
      //printf("choosing row %d for row %d\n", i, current_row);
      choose_row(i, square2);
      bool bad_metrics = check_metrics(square1);
      //printf("bm:%d incomp:%d both: %d\n", (! bad_metrics), current_row < square1->size, (! bad_metrics) && current_row < square1->size);
      if ((! bad_metrics) && current_row < square1->size) {
        //printf("recurse\n");
        compare_permutations(square1, square2);
      }
      unchoose_row(i, square2);
    }
  }
}
Ejemplo n.º 21
0
double timer_query::time_in_ms(bool wait, double timeout_ms) 
{
  if (wait) {
    auto start = std::chrono::system_clock::now();

    while (!is_available())
    {
      auto end = std::chrono::system_clock::now();
      auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
      if (elapsed.count() > timeout_ms) {
        BOOST_LOG_TRIVIAL(info) << "timer_query::time_in_ms(): Time out reached.";
        _result_fetched = true;
        return 0.0;
      } 
    };
  }
  GLuint64 result = 0;
  glGetQueryObjectui64v(id(), GL_QUERY_RESULT, &result);
  _result_fetched = true;
  return double(result) / double(1000000);
}
Ejemplo n.º 22
0
void devide_and_conquer(int paper[][128], int x, int y, int size) {
    int half_size = size / 2;
    if(size != 1) {
        if(!is_available(paper, x, y, size)) {
            devide_and_conquer(paper, x, y, half_size);
            devide_and_conquer(paper, x + half_size, y, half_size);
            devide_and_conquer(paper, x, y + half_size, half_size);
            devide_and_conquer(paper, x + half_size, y + half_size, half_size);
            return ;
        }
    }
    
    switch(paper[x][y]) {
        case 1:
            pink_count++;
            break;
        case 0:
            wite_count++;
            break;
    }
}
Ejemplo n.º 23
0
void HeapRegionManager::verify() {
  guarantee(length() <= _allocated_heapregions_length,
            "invariant: _length: %u _allocated_length: %u",
            length(), _allocated_heapregions_length);
  guarantee(_allocated_heapregions_length <= max_length(),
            "invariant: _allocated_length: %u _max_length: %u",
            _allocated_heapregions_length, max_length());

  bool prev_committed = true;
  uint num_committed = 0;
  HeapWord* prev_end = heap_bottom();
  for (uint i = 0; i < _allocated_heapregions_length; i++) {
    if (!is_available(i)) {
      prev_committed = false;
      continue;
    }
    num_committed++;
    HeapRegion* hr = _regions.get_by_index(i);
    guarantee(hr != NULL, "invariant: i: %u", i);
    guarantee(!prev_committed || hr->bottom() == prev_end,
              "invariant i: %u " HR_FORMAT " prev_end: " PTR_FORMAT,
              i, HR_FORMAT_PARAMS(hr), p2i(prev_end));
    guarantee(hr->hrm_index() == i,
              "invariant: i: %u hrm_index(): %u", i, hr->hrm_index());
    // Asserts will fire if i is >= _length
    HeapWord* addr = hr->bottom();
    guarantee(addr_to_region(addr) == hr, "sanity");
    // We cannot check whether the region is part of a particular set: at the time
    // this method may be called, we have only completed allocation of the regions,
    // but not put into a region set.
    prev_committed = true;
    prev_end = hr->end();
  }
  for (uint i = _allocated_heapregions_length; i < max_length(); i++) {
    guarantee(_regions.get_by_index(i) == NULL, "invariant i: %u", i);
  }

  guarantee(num_committed == _num_committed, "Found %u committed regions, but should be %u", num_committed, _num_committed);
  _free_list.verify();
}
/**
 * nm_supplicant_manager_create_interface:
 * @self: the #NMSupplicantManager
 * @ifname: the interface for which to obtain the supplicant interface
 * @is_wireless: whether the interface is supposed to be wireless.
 *
 * Note: the manager owns a reference to the instance and the only way to
 *   get the manager to release it, is by dropping all other references
 *   to the supplicant-interface (or destroying the manager).
 *
 * Retruns: (transfer-full): returns a #NMSupplicantInterface or %NULL.
 *   Must be unrefed at the end.
 * */
NMSupplicantInterface *
nm_supplicant_manager_create_interface (NMSupplicantManager *self,
                                        const char *ifname,
                                        gboolean is_wireless)
{
	NMSupplicantManagerPrivate *priv;
	NMSupplicantInterface *iface;
	GSList *ifaces;

	g_return_val_if_fail (NM_IS_SUPPLICANT_MANAGER (self), NULL);
	g_return_val_if_fail (ifname != NULL, NULL);

	priv = NM_SUPPLICANT_MANAGER_GET_PRIVATE (self);

	nm_log_dbg (LOGD_SUPPLICANT, "(%s): creating new supplicant interface", ifname);

	/* assert against not requesting duplicate interfaces. */
	for (ifaces = priv->ifaces; ifaces; ifaces = ifaces->next) {
		if (g_strcmp0 (nm_supplicant_interface_get_ifname (ifaces->data), ifname) == 0)
			g_return_val_if_reached (NULL);
	}

	iface = nm_supplicant_interface_new (ifname,
	                                     is_wireless,
	                                     priv->fast_supported,
	                                     priv->ap_support);

	priv->ifaces = g_slist_prepend (priv->ifaces, iface);
	g_object_add_toggle_ref ((GObject *) iface, _sup_iface_last_ref, self);

	/* If we're making the supplicant take a time out for a bit, don't
	 * let the supplicant interface start immediately, just let it hang
	 * around in INIT state until we're ready to talk to the supplicant
	 * again.
	 */
	if (is_available (self))
		nm_supplicant_interface_set_supplicant_available (iface, TRUE);

	return iface;
}
Ejemplo n.º 25
0
bool HeapRegionManager::allocate_containing_regions(MemRegion range, size_t* commit_count) {
  size_t commits = 0;
  uint start_index = (uint)_regions.get_index_by_address(range.start());
  uint last_index = (uint)_regions.get_index_by_address(range.last());

  // Ensure that each G1 region in the range is free, returning false if not.
  // Commit those that are not yet available, and keep count.
  for (uint curr_index = start_index; curr_index <= last_index; curr_index++) {
    if (!is_available(curr_index)) {
      commits++;
      expand_at(curr_index, 1);
    }
    HeapRegion* curr_region  = _regions.get_by_index(curr_index);
    if (!curr_region->is_free()) {
      return false;
    }
  }

  allocate_free_regions_starting_at(start_index, (last_index - start_index) + 1);
  *commit_count = commits;
  return true;
}
Ejemplo n.º 26
0
static void *
find_range(sel4utils_alloc_data_t *data, size_t num_pages, size_t size_bits)
{
    /* look for a contiguous range that is free.
     * We use first-fit with the optimisation that we store
     * a pointer to the last thing we freed/allocated */
    size_t contiguous = 0;
    uintptr_t start = ALIGN_UP(data->last_allocated, SIZE_BITS_TO_BYTES(size_bits));
    uintptr_t current = start;

    assert(IS_ALIGNED(start, size_bits));
    while (contiguous < num_pages) {

        bool available = is_available(data->top_level, current, size_bits);
        current += SIZE_BITS_TO_BYTES(size_bits);

        if (available) {
            /* keep going! */
            contiguous++;
        } else {
            /* reset start and try again */
            start = current;
            contiguous = 0;
        }

        if (current >= KERNEL_RESERVED_START) {
            ZF_LOGE("Out of virtual memory");
            return NULL;
        }

    }

    data->last_allocated = current;

    return (void *) start;
}
Ejemplo n.º 27
0
// -------------------------------------------------------------
// check recursively to detect matching pieces
// -------------------------------------------------------------
static void check(Tile* tiles, int xp, int yp, int lastDir, PointList& list, bool rec) {
	if (is_valid(xp, yp)) {
		Tile& t = tiles[get_tiles_index(xp, yp)];
		int color = t.color;
		if (color != -1) {
			for (int i = 0; i < 4; ++i) {
				if (i != lastDir) {
					int sx = xp + XM[i];
					int sy = yp + YM[i];
					if (is_available(tiles, sx, sy)) {
						Tile& nt = tiles[get_tiles_index(sx, sy)];
						int nc = nt.color;
						if (nc != -1) {
							while (color == nc && color != -1) {
								bool recheck = !list.contains(sx, sy);
								list.add(sx, sy);
								if (recheck && rec) {
									check(tiles, sx, sy, LD[i], list, rec);
								}
								sx += XM[i];
								sy += YM[i];
								if (is_valid(sx, sy)) {
									Tile& npe = tiles[get_tiles_index(sx, sy)];
									nc = npe.color;
								}
								else {
									nc = -1;
								}
							}
						}
					}
				}
			}
		}
	}
}
Ejemplo n.º 28
0
//'usevec': for local used.
bool IR_CP::doProp(IN IRBB * bb, Vector<IR*> & usevec)
{
    bool change = false;
    C<IR*> * cur_iter, * next_iter;

    for (BB_irlist(bb).get_head(&cur_iter),
         next_iter = cur_iter; cur_iter != NULL; cur_iter = next_iter) {

        IR * def_stmt = cur_iter->val();

        BB_irlist(bb).get_next(&next_iter);

        if (!is_copy(def_stmt)) { continue; }

        DUSet const* useset = NULL;
        UINT num_of_use = 0;
        SSAInfo * ssainfo = NULL;
        bool ssadu = false;
        if ((ssainfo = def_stmt->get_ssainfo()) != NULL &&
            SSA_uses(ssainfo).get_elem_count() != 0) {
            //Record use_stmt in another vector to facilitate this function
            //if it is not in use-list any more after copy-propagation.
            SEGIter * sc;
            for    (INT u = SSA_uses(ssainfo).get_first(&sc);
                 u >= 0; u = SSA_uses(ssainfo).get_next(u, &sc)) {
                IR * use = m_ru->get_ir(u);
                ASSERT0(use);
                usevec.set(num_of_use, use);
                num_of_use++;
            }
            ssadu = true;
        } else if (def_stmt->get_exact_ref() == NULL &&
                   !def_stmt->is_void()) {
            //Allowing copy propagate exact or VOID value.
            continue;
        } else if ((useset = def_stmt->readDUSet()) != NULL &&
                   useset->get_elem_count() != 0) {
            //Record use_stmt in another vector to facilitate this function
            //if it is not in use-list any more after copy-propagation.
            DUIter di = NULL;
            for (INT u = useset->get_first(&di);
                 u >= 0; u = useset->get_next(u, &di)) {
                IR * use = m_ru->get_ir(u);
                usevec.set(num_of_use, use);
                num_of_use++;
            }
        } else  {
            continue;
        }

        IR const* prop_value = get_propagated_value(def_stmt);

        for (UINT i = 0; i < num_of_use; i++) {
            IR * use = usevec.get(i);
            ASSERT0(use->is_exp());
            IR * use_stmt = use->get_stmt();
            ASSERT0(use_stmt->is_stmt());

            ASSERT0(use_stmt->get_bb() != NULL);
            IRBB * use_bb = use_stmt->get_bb();
            if (!ssadu &&
                !(bb == use_bb && bb->is_dom(def_stmt, use_stmt, true)) &&
                !m_cfg->is_dom(BB_id(bb), BB_id(use_bb))) {
                //'def_stmt' must dominate 'use_stmt'.
                //e.g:
                //    if (...) {
                //        g = 10; //S1
                //    }
                //    ... = g; //S2
                //g can not be propagted since S1 is not dominate S2.
                continue;
            }

            if (!is_available(def_stmt, prop_value, use_stmt)) {
                //The value that will be propagated can
                //not be killed during 'ir' and 'use_stmt'.
                //e.g:
                //    g = a; //S1
                //    if (...) {
                //        a = ...; //S3
                //    }
                //    ... = g; //S2
                //g can not be propagted since a is killed by S3.
                continue;
            }

            if (!ssadu && !m_du->isExactAndUniqueDef(def_stmt, use)) {
                //Only single definition is allowed.
                //e.g:
                //    g = 20; //S3
                //    if (...) {
                //        g = 10; //S1
                //    }
                //    ... = g; //S2
                //g can not be propagted since there are
                //more than one definitions are able to get to S2.
                continue;
            }

            if (!canBeCandidate(prop_value)) {
                continue;
            }

            CPCtx lchange;
            IR * old_use_stmt = use_stmt;

            replaceExp(use, prop_value, lchange, ssadu);

            ASSERT(use_stmt && use_stmt->is_stmt(),
                    ("ensure use_stmt still legal"));
            change |= CPC_change(lchange);

            if (!CPC_change(lchange)) { continue; }

            //Indicate whether use_stmt is the next stmt of def_stmt.
            bool is_next = false;
            if (next_iter != NULL && use_stmt == next_iter->val()) {
                is_next = true;
            }

            RefineCtx rf;
            use_stmt = m_ru->refineIR(use_stmt, change, rf);
            if (use_stmt == NULL && is_next) {
                //use_stmt has been optimized and removed by refineIR().
                next_iter = cur_iter;
                BB_irlist(bb).get_next(&next_iter);
            }

            if (use_stmt != NULL && use_stmt != old_use_stmt) {
                //use_stmt has been removed and new stmt generated.
                ASSERT(old_use_stmt->is_undef(), ("the old one should be freed"));

                C<IR*> * irct = NULL;
                BB_irlist(use_bb).find(old_use_stmt, &irct);
                ASSERT0(irct);
                BB_irlist(use_bb).insert_before(use_stmt, irct);
                BB_irlist(use_bb).remove(irct);
            }
        } //end for each USE
    } //end for IR
    return change;
}
 bool can_fit_vehicle(Vehicle *vehicle) {
     return is_available() && vehicle->can_fit_in_spot(shared_from_this());
 }
Ejemplo n.º 30
0
/*
 * Convert a raw index into a platform + device number.
 */
struct resource_alloc utility::index_to_alloc(size_t index)
{
	struct resource_alloc alloc = {
		.platform = 0,
		.device = 0,
		.compute_units = 0
	};
	if(index < 0 || queues.size() <= index) CHECK_ERR(ALLOC_ERR);
	alloc.platform = queues[index]->platform();
	alloc.device = queues[index]->device();
	alloc.compute_units = queues[index]->computeUnits();
	return alloc;
}

/*
 * For job 'j' in queue 'q', get prediction for other queue 'q_to_predict'
 */
float utility::get_prediction(size_t q, size_t j, size_t q_to_predict)
{
	// Find prediction slot
	for(size_t i = 0; i < prediction_slots; i++) {
		if(queues[q_to_predict]->platform() == system_devices[i].platform &&
			 queues[q_to_predict]->device() == system_devices[i].device &&
			 queues[q_to_predict]->computeUnits() == system_devices[i].compute_units) {
			return queues[q]->queued(j)->predictions[i];
		}
	}
	CHECK_ERR(FAILURE); // Couldn't find the prediction
	return -1.0;
}

/*
 * Find candidate HW queues, sorted from best to worst.
 */
std::vector<size_t> utility::get_candidates(Job* job)
{
	// Get the best (i.e. preferred) architecture
	std::pair<size_t, float> best(0, -1000.0f);
	for(size_t i = 0; i < job->predictions.size(); i++) {
		std::pair<size_t, float> cur(i, job->predictions[i]);
		if(is_available(i) && utility::better_candidate(best, cur))
			best = cur;
	}

	// Find candidate prediction slots
	std::vector<std::pair<size_t, float> > candidates;
	candidates.push_back(best);
	for(size_t i = 0; i < job->predictions.size(); i++)
		if(i != best.first && is_available(i) &&
			 utility::within_threshold(best.second, job->predictions[i]))
			candidates.push_back(std::pair<size_t, float>(i, job->predictions[i]));
	std::sort(candidates.begin(), candidates.end(), utility::better_candidate);

	// Convert prediction slots into HW queues & return candidates
	std::vector<size_t> queues;
	for(std::pair<size_t, float> pair : candidates)
	{
		struct resource_alloc tmp = system_devices[pair.first];
		std::vector<size_t> tmp_queues = utility::alloc_to_index(tmp);
		for(size_t i : tmp_queues)
			queues.push_back(i);
	}
	return queues;
}