// v is the current vertex
// l is the number of steps remained to do
// backtracking, computes the number of possible paths of length l from v to dst
static void search(vertex_t v, int l){
    // global dst, count, visited, graph
    list_t X, Y;
    register int i;
    register vertex_t u;

    list__init(&X);
    list__init(&Y);
    for (i = graph.adjs[v].len-1; i >= 0; i--){
        u = graph.adjs[v].elems[i];
        if (!visited[u]){
            list__append(&Y, u);
        }
    }
    // remove all the edges to v
    for (i = Y.len-1; i >= 0; i--){
        u = Y.elems[i];
        if (list__contains(&graph.adjs[u], v)){
            if (graph.adjs[u].len <= 1){
                // u is isolated, rollback
                for (i = X.len-1; i >= 0; i--){
                    list__append(&graph.adjs[X.elems[i]], v);
                }
                return;
            }
            // updating the graph
            list__remove(&graph.adjs[u], v);
            // save, for restoring it later 
            list__append(&X, u);
        }
    }
    visited[v] = TRUE;
    // check that dst is reachable from all the vertices of X 
    if (X.len == 0 || connected(&graph, X, dst)){
        // one step less is remained to dst
        l -= 1;
        // go over all possible steps that can be done
        // i.e., check every unvisited adjacent vertex
        for (i = Y.len-1; i >= 0; i--){
            u = Y.elems[i];
            if (l > 0 && u != dst){
                // not yet reached dst, let's check the step
                search(u, l);
            } else if (l == 0 && u == dst){
                // we reached dst and passed all vertices
                count += 1;
                //if (count % 1000 == 0){
                //    printf("%d\n", count);
                //}
            }
        }
    }
    visited[v] = FALSE;

    // restore the graph
    for (i = X.len-1; i >= 0; i--){
        list__append(&graph.adjs[X.elems[i]], v);
    }
}
Exemple #2
0
/* manager__spawn_worker
 * The initialization point for a worker to start doing real work for a manager.
 */
void manager__spawn_worker(Manager *mgr) {
  /* Set up our worker. */
  workerid_t id = MAX_WORKER_ID;
  manager__assign_worker_id(&id);
  if(id == MAX_WORKER_ID)
    show_error(E_GENERIC, "Unable to get a worker ID.  This should never happen.");
  mgr->workers[id].id = id;
  mgr->workers[id].hits = 0;
  mgr->workers[id].misses = 0;
  mgr->workers[id].rounds = 0;
  mgr->workers[id].updates = 0;
  mgr->workers[id].deletions = 0;
  unsigned int seed = time(NULL) + id;
  uint8_t has_list_pin = 0;

  /* Begin the main loop for grabbing buffers.
   * Note that workers operate in "rounds".  This more closely mimics the behavior of a real application.  Each round the worker
   * will attempt acquire a pin on X number of buffers (controlled by opts values).  It will continue working on each round until
   * all buffers are held and pinned.  Once the round is finished, it will do 1 of 3 things:
   *   1) Simulate read-only: just release the pins.
   *   2) Simulate update   : modify the data in the pages (with random garbage) to update them and invoke the Copy-on-Write logic.
   *                          Obviously this isn't emulating write-through or MVCC, just the usage pattern for the in-memory
   *                          operations a buffer pool would have to do.
   *   3) Simulate delete   : deletes all the references buffers, it's quite rare for delete operations, even in real applications,
   *                          so this should be used sparingly.  Tyche will automatically limit the removed pages to DELETE_RATIO
   *                          or 1 page, whichever is greater.
   *
   * The workers will also follow whatever bias was established via opts.  Each workers will consider the first X% of the total
   * data set to be considered "hot" and will account for Y% of all pages used cumulatively through all the rounds.  This allows
   * the workers to emulate real-world usage patterns such as the Pareto Principle (80/20 Rule).  This defaults to 100/100 which
   * means "no bias" :(  I have yet to meet an application that has no bias in data...
   */
  // Initial a few values.
  const int BUF_MAX = 10000;  // At 8 bytes per pointer, this uses 80KB on the stack.
  const int modulo = (opts.max_pages_retrieved - opts.min_pages_retrieved) > 1 ? (opts.max_pages_retrieved - opts.min_pages_retrieved) : 2;
  Buffer *bufs[BUF_MAX];
  int fetch_this_round = 0;
  int rv = 0;
  int buf_rv = 0;
  bufferid_t id_to_get = 0;
  int delete_ceiling = 0;
  const int hot_floor = 0;
  const int hot_ceiling = opts.page_count * opts.bias_percent;
  const int cold_floor = hot_ceiling;
  const int cold_ceiling = opts.page_count + 1;
  int temp_ceiling = 0;
  int temp_floor = 0;
  uint64_t hot_selections = 0;
  uint64_t cold_selections = 0;
  float my_aggregate = 0.0;
  uint64_t updates = 0;
  float my_update_frequency = 0.0;
  uint64_t deletions = 0;
  float my_delete_frequency = 0.0;
  while(mgr->runnable != 0) {
    /* Callers can provide their own list pins before calling read operations.  Do so here to reduce lock contention. */
    if(has_list_pin == 0) {
      list__update_ref(mgr->list, 1);
      has_list_pin = 1;
    }

    /* Determine how many buffers we're supposed to get, whether they should come from "hot" or not, etc. */
    fetch_this_round = (rand_r(&seed) % modulo) + opts.min_pages_retrieved;
    if(fetch_this_round == 0)
      fetch_this_round++;
    for(int i = 0; i<fetch_this_round; i++) {
      my_aggregate = 1.0 * hot_selections / (hot_selections + cold_selections);
      // Find the ID to get.  Make it hot if necessary.
      temp_ceiling = opts.page_count;
      temp_floor = 0;
      // If bias percent is non-zero, gotta find hot/cold explicitly.
      if(opts.bias_percent != 0.0) {
        temp_ceiling = hot_ceiling;
        temp_floor = hot_floor;
        hot_selections++;
        if(my_aggregate > opts.bias_aggregate) {
          temp_ceiling = cold_ceiling;
          temp_floor = cold_floor;
          hot_selections--;
          cold_selections++;
        }
      }

      /* Go find our buffer!  If the one we need doesn't exist, get it and add it. */
      id_to_get = (rand_r(&seed) % temp_ceiling) - temp_floor;
      rv = list__search(mgr->list, &bufs[i], id_to_get, has_list_pin);

      if(rv == E_OK)
        mgr->workers[id].hits++;
      while(rv == E_BUFFER_NOT_FOUND) {
        mgr->workers[id].misses++;
        buf_rv = buffer__initialize(&bufs[i], id_to_get, 0, NULL, mgr->pages[id_to_get]);
        if (buf_rv != E_OK)
          show_error(buf_rv, "Unable to get a buffer.  RV is %d.", buf_rv);
        bufs[i]->ref_count++;
        rv = list__add(mgr->list, bufs[i], has_list_pin);
        if (rv == E_OK)
          break;
        // If it already exists, destroy our copy and search again.
        if (rv == E_BUFFER_ALREADY_EXISTS)
          buffer__destroy(bufs[i], DESTROY_DATA);
        rv = list__search(mgr->list, &bufs[i], id_to_get, has_list_pin);
      }
    }
    // Hooray, we finished a round!
    mgr->workers[id].rounds++;

    /* We should now have all the buffers we wanted for this round, pinned.  Decide if we should update or delete. */
    if(my_update_frequency < opts.update_frequency) {
      // Try to update the buffers.  The purpose of tyche is to stress test the API, not data randomizing speed.  So we'll cheat by
      // simply copying the same data.
      for(int i=0; i<fetch_this_round; i++) {
        void *new_data = malloc(bufs[i]->data_length);
        memcpy(new_data, bufs[i]->data, bufs[i]->data_length);
        rv = list__update(mgr->list, &bufs[i], new_data, bufs[i]->data_length, has_list_pin);
        while(rv == E_BUFFER_IS_DIRTY) {
          // Someone else updated this buffer before us and it's in the slaughter house now.  Find the updated one.
          id_to_get = bufs[i]->id;
          buffer__release_pin(bufs[i]);
          rv = list__search(mgr->list, &bufs[i], id_to_get, has_list_pin);
          while(rv == E_BUFFER_NOT_FOUND) {
            buf_rv = buffer__initialize(&bufs[i], id_to_get, 0, NULL, mgr->pages[id_to_get]);
            bufs[i]->ref_count++;
            rv = list__add(mgr->list, bufs[i], has_list_pin);
            if (rv == E_OK)
              break;
            if (rv == E_BUFFER_ALREADY_EXISTS)
              buffer__destroy(bufs[i], DESTROY_DATA);
            rv = list__search(mgr->list, &bufs[i], id_to_get, has_list_pin);
          }
          // Now try the update again.
          rv = list__update(mgr->list, &bufs[i], new_data, bufs[i]->data_length, has_list_pin);
        }
      }
      // We finished this round's update.  Increment it.
      updates++;
      mgr->workers[id].updates += fetch_this_round;
    }
    my_update_frequency = 1.0 * updates / mgr->workers[id].rounds;

    // Now decide if we should delete any buffers.
    delete_ceiling = 0;
    if(my_delete_frequency < opts.delete_frequency) {
      // Try to delete a portion of the buffers.  See DELETE_RATIO.
      delete_ceiling = (fetch_this_round * DELETE_RATIO / 100) + 1;
      for(int i=0; i<delete_ceiling; i++) {
        rv = list__remove(mgr->list, bufs[i]);
      }
      deletions++;
      mgr->workers[id].deletions += delete_ceiling;
    }
    my_delete_frequency = 1.0 * deletions / mgr->workers[id].rounds;

    // Now release any remaining pins we have.  Deleted ones already lost their pin, so we start from delete_ceiling, if applicable.
    for(int i = delete_ceiling; i<fetch_this_round; i++)
      buffer__release_pin(bufs[i]);

    /* Release the list pin if there are pending writers.  This is a dirty read/race but that's ok for an extra loop */
    if(mgr->list->pending_writers != 0) {
      list__update_ref(mgr->list, -1);
      has_list_pin = 0;
    }
  }

  // We ran out of time.  Let's update the manager with our statistics before we quit.  Then release our pin.
  pthread_mutex_lock(&mgr->lock);
  mgr->hits += mgr->workers[id].hits;
  mgr->misses += mgr->workers[id].misses;
  mgr->rounds += mgr->workers[id].rounds;
  mgr->deletions += mgr->workers[id].deletions;
  mgr->updates += mgr->workers[id].updates;
  pthread_mutex_unlock(&mgr->lock);
  if(has_list_pin != 0) {
    list__update_ref(mgr->list, -1);
    has_list_pin = 0;
  }

  // All done.
  pthread_exit(0);
}