Ejemplo n.º 1
0
void fiber_control::yield() {
  // the core scheduling logic
  tls* t = get_tls_ptr();
  if (t == NULL) return;
  // remove some other work to do.
  fiber_control* parentgroup = t->parent;
  size_t workerid = t->workerid;
  fiber* next_fib = NULL;
  if (parentgroup->schedule[workerid].nactive > 0) {
    parentgroup->schedule[workerid].active_lock.lock();
    next_fib = parentgroup->active_queue_remove(workerid);
    parentgroup->schedule[workerid].active_lock.unlock();
  }
  // no work on my queue!
  if (next_fib == NULL) {
    // ok. do a full sweep. Try to steal some work
    for (size_t i = 1;i < parentgroup->nworkers; ++i) {
      size_t probe = (i + workerid) % parentgroup->nworkers;
      if (parentgroup->schedule[probe].nactive > 0) {
        parentgroup->schedule[probe].active_lock.lock();
        fiber_control::fiber* ret = parentgroup->schedule[probe].active_head.next;
        if (ret != NULL && ret->affinity.get(workerid)) {
          next_fib = parentgroup->active_queue_remove(probe);
        }
        parentgroup->schedule[probe].active_lock.unlock();
        if (next_fib) {
          break;
        }
      }
    }
  }
  t->parent->yield_to(next_fib);
}
Ejemplo n.º 2
0
bool fiber_control::worker_has_fibers_on_queue() {
  tls* t = get_tls_ptr();
  if (t == NULL) return false;
  fiber_control* parentgroup = t->parent;
  size_t workerid = t->workerid;
  return (parentgroup->schedule[workerid].nactive > 0);
}
Ejemplo n.º 3
0
void fiber_control::set_tls(void* tls) {
  fiber_control::tls* f = get_tls_ptr();
  if (f != NULL) {
    f->cur_fiber->fls = tls;
  } else {
    // cannot get TLS of a non-fiber
    ASSERT_MSG(false, "Trying to get a fiber TLS from a non-fiber");
  }
}
Ejemplo n.º 4
0
void* fiber_control::get_tls() {
  fiber_control::tls* f = get_tls_ptr();
  if (f != NULL) {
    return f->cur_fiber->fls;
  } else {
    // cannot get TLS of a non-fiber
    ASSERT_MSG(false, "Trying to get a fiber TLS from a non-fiber");
    return NULL;
  }
}
Ejemplo n.º 5
0
void fiber_control::deschedule_self(pthread_mutex_t* lock) {
  fiber* fib = get_tls_ptr()->cur_fiber;
  fib->lock.lock();
  assert(fib->descheduled == false);
  assert(fib->scheduleable == true);
  fib->deschedule_lock = lock;
  fib->descheduled = true;
  //printf("Descheduling requested %ld\n", fib->id);
  fib->lock.unlock();
  yield();
}
Ejemplo n.º 6
0
void fiber_control::fast_yield() {
  tls* t = get_tls_ptr();
  if (t == NULL) return;
  // remove some other work to do.
  fiber_control* parentgroup = t->parent;
  size_t workerid = t->workerid;
  fiber* next_fib = NULL;
  if (parentgroup->schedule[workerid].nactive > 0) {
    parentgroup->schedule[workerid].active_lock.lock();
    next_fib = parentgroup->active_queue_remove(workerid);
    parentgroup->schedule[workerid].active_lock.unlock();
  }
  if (next_fib != NULL) t->parent->yield_to(next_fib);
}
Ejemplo n.º 7
0
// the trampoline to call the user function. This function never returns
void fiber_control::trampoline(intptr_t _args) {
  // we may have launched to here by switching in from another fiber.
  // we will need to clean up the previous fiber
  tls* t = get_tls_ptr();
  if (t->prev_fiber) t->parent->reschedule_fiber(t->workerid, t->prev_fiber);
  t->prev_fiber = NULL;

  trampoline_args* args = reinterpret_cast<trampoline_args*>(_args);
  try {
    args->fn();
  } catch (...) {
  }
  delete args;
  fiber_control::exit();
}
Ejemplo n.º 8
0
void fiber_control::worker_init(size_t workerid) {
  /*
   * This is the "root" stack for each worker.
   * When there are active user threads associated with this worker, 
   * it will switch directly between the fibers.
   * But, when the worker has no other fiber to run, it will return to this
   * stack and and wait in a condition variable
   */
  // create a root context
  create_tls_ptr();
  // set up the tls structure
  tls* t = get_tls_ptr();
  t->prev_fiber = NULL;
  t->cur_fiber = NULL;
  t->garbage = NULL;
  t->workerid = workerid;
  t->parent = this;

  schedule[workerid].active_lock.lock();
  while(!stop_workers) {
    // get a fiber to run
    fiber* next_fib = t->parent->active_queue_remove(workerid);
    if (next_fib != NULL) {
      // if there is a fiber. yield to it
      schedule[workerid].active_lock.unlock();
      yield_to(next_fib);
      distributed_control* dc = distributed_control::get_instance();
      if (dc) dc->flush_soon();
      schedule[workerid].active_lock.lock();
    } else {
      // if there is no fiber. wait.
      schedule[workerid].waiting = true;
      schedule[workerid].active_cond.wait(schedule[workerid].active_lock);
      schedule[workerid].waiting = false;
    }
  }
  schedule[workerid].active_lock.unlock();
}
fiber_control::fiber* fiber_control::get_active_fiber() {
  tls* t = get_tls_ptr();
  if (t != NULL) return t->cur_fiber;
  else return NULL;
}
Ejemplo n.º 10
0
size_t fiber_control::get_worker_id() {
  fiber_control::tls* tls = get_tls_ptr();
  if (tls != NULL) return tls->workerid;
  else return (size_t)(-1);
}
Ejemplo n.º 11
0
bool fiber_control::in_fiber() {
  return get_tls_ptr() != NULL;
}
Ejemplo n.º 12
0
size_t fiber_control::get_tid() {
  fiber_control::tls* tls = get_tls_ptr();
  if (tls != NULL) return reinterpret_cast<size_t>(tls->cur_fiber);
  else return (size_t)(0);
}
Ejemplo n.º 13
0
void fiber_control::yield_to(fiber* next_fib) {
  // the core scheduling logic
  tls* t = get_tls_ptr();
  /*
  if (next_fib) {
    printf("yield to: %ld\n", next_fib->id);
    if (t->cur_fiber) {
      printf("from: %ld\n", t->cur_fiber->id);
    }
  } */
  if (next_fib != NULL) {
    // reset the priority flag
    next_fib->priority = false;
    // current fiber moves to previous
    // next fiber move to current
    t->prev_fiber = t->cur_fiber;
    t->cur_fiber = next_fib;
    if (t->prev_fiber != NULL) {
      // context switch to fib outside the lock
      boost::context::jump_fcontext(t->prev_fiber->context,
                                    t->cur_fiber->context,
                                    t->cur_fiber->initial_trampoline_args);
    } else {
      boost::context::jump_fcontext(&t->base_context,
                                    t->cur_fiber->context,
                                    t->cur_fiber->initial_trampoline_args);
    }
  } else {
    // ok. there isn't anything to schedule to
    // am I meant to be terminated? or descheduled?
    if (t->cur_fiber &&
        (t->cur_fiber->terminate || t->cur_fiber->descheduled) ) {
      // yup. killing current fiber
      // context switch back to basecontext which will
      // do the cleanup
      //
      // current fiber moves to previous
      // next fiber (base context) move to current
      // (as identifibed by cur_fiber = NULL)
      t->prev_fiber = t->cur_fiber;
      t->cur_fiber = NULL;
      boost::context::jump_fcontext(t->prev_fiber->context,
                                    &t->base_context,
                                    0);
    } else {
      // nothing to do, and not terminating...
      // then don't yield!
      return;
    }
  }
  // reread the tls pointer because we may have woken up in a different thread
  t = get_tls_ptr();
  // reschedule the previous fiber
  if (t->prev_fiber) reschedule_fiber(t->workerid, t->prev_fiber);
  t->prev_fiber = NULL;

  // if distributed_controller alive
  distributed_control* dc = distributed_control::get_instance();
  if (dc && t->workerid < dc->num_handler_threads()) {
    dc->handle_incoming_calls(t->workerid, dc->num_handler_threads());
  }
}