Пример #1
0
  void FinalizerHandler::perform(STATE) {
    GCTokenImpl gct;
    utilities::thread::Thread::set_os_name("rbx.finalizer");

    state->vm()->thread->hard_unlock(state, gct);

    while(!exit_) {
      if(!process_list_) first_process_item();

      if(!process_list_) {
        utilities::thread::Mutex::LockGuard lg(worker_lock_);

        if(finishing_) supervisor_signal();

        // exit_ might have been set in the mean while after
        // we grabbed the worker_lock
        if(!exit_) {
          GCIndependent indy(state);
          worker_wait();
        }

        continue;
      }

      finalize(state);
      next_process_item();
    }
  }
Пример #2
0
/*===========================================================================*
 *				fs_sendrec				     *
 *===========================================================================*/
int fs_sendrec(endpoint_t fs_e, message *reqmp)
{
  struct vmnt *vmp;
  int r;

  if ((vmp = find_vmnt(fs_e)) == NULL) {
	printf("Trying to talk to non-existent FS endpoint %d\n", fs_e);
	return(EIO);
  }
  if (fs_e == fp->fp_endpoint) return(EDEADLK);

  self->w_sendrec = reqmp;	/* Where to store request and reply */

  /* Find out whether we can send right away or have to enqueue */
  if (	!(vmp->m_flags & VMNT_CALLBACK) &&
	vmp->m_comm.c_cur_reqs < vmp->m_comm.c_max_reqs) {
	/* There's still room to send more and no proc is queued */
	r = sendmsg(vmp, vmp->m_fs_e, self);
  } else {
	r = queuemsg(vmp);
  }
  self->w_next = NULL;	/* End of list */

  if (r != OK) return(r);

  worker_wait();	/* Yield execution until we've received the reply. */

  return(reqmp->m_type);
}
Пример #3
0
  void FinalizerHandler::perform(STATE) {
    GCTokenImpl gct;
    const char* thread_name = "rbx.finalizer";
    self_->set_name(thread_name);

    RUBINIUS_THREAD_START(thread_name, state->vm()->thread_id(), 1);

    state->vm()->thread->hard_unlock(state, gct, 0);

    while(!exit_) {
      state->vm()->set_call_frame(0);

      if(!process_list_) first_process_item();

      if(!process_list_) {
        {
          utilities::thread::Mutex::LockGuard lg(worker_lock_);

          if(finishing_) supervisor_signal();

          // exit_ might have been set in the mean while after
          // we grabbed the worker_lock
          if(exit_) break;
          state->gc_independent(gct, 0);
          paused_ = true;
          pause_cond_.signal();
          worker_wait();
          if(exit_) break;
        }
        state->gc_dependent();
        {
          utilities::thread::Mutex::LockGuard lg(worker_lock_);
          paused_ = false;
          if(exit_) break;
        }

        continue;
      }

      finalize(state);
      next_process_item();
    }
    RUBINIUS_THREAD_STOP(thread_name, state->vm()->thread_id(), 1);
  }
Пример #4
0
void tll_upgrade(tll_t *tllp)
{
/* Upgrade three-level-lock tll from read-serialized to write-only */

  assert(self != NULL);
  assert(tllp != NULL);
  assert(tllp->t_owner == self);
  assert(tllp->t_current != TLL_READ); /* i.e., read-serialized or write-only*/
  if (tllp->t_current == TLL_WRITE) return;	/* Nothing to do */
  if (tllp->t_readonly != 0) {		/* Wait for readers to leave */
	assert(!(tllp->t_status & TLL_UPGR));
	tllp->t_status |= TLL_UPGR;
	worker_wait();
	tllp->t_status &= ~TLL_UPGR;
	tllp->t_status &= ~TLL_PEND;
	assert(tllp->t_readonly == 0);
  }
  tllp->t_current = TLL_WRITE;
}
Пример #5
0
/*===========================================================================*
 *				vm_sendrec				     *
 *===========================================================================*/
int vm_sendrec(message *reqmp)
{
  int r;

  assert(self);
  assert(reqmp);

  self->w_sendrec = reqmp;	/* Where to store request and reply */

  r = sendmsg(NULL, VM_PROC_NR, self);

  self->w_next = NULL;	/* End of list */

  if (r != OK) return(r);

  worker_wait();	/* Yield execution until we've received the reply. */

  return(reqmp->m_type);
}
Пример #6
0
/*===========================================================================*
 *				drv_sendrec				     *
 *===========================================================================*/
int drv_sendrec(endpoint_t drv_e, message *reqmp)
{
	int r;
	struct dmap *dp;

	/* For the CTTY_MAJOR case, we would actually have to lock the device
	 * entry being redirected to.  However, the CTTY major only hosts a
	 * character device while this function is used only for block devices.
	 * Thus, we can simply deny the request immediately.
	 */
	if (drv_e == CTTY_ENDPT) {
		printf("VFS: /dev/tty is not a block device!\n");
		return EIO;
	}

	if ((dp = get_dmap(drv_e)) == NULL)
		panic("driver endpoint %d invalid", drv_e);

	lock_dmap(dp);
	if (dp->dmap_servicing != INVALID_THREAD)
		panic("driver locking inconsistency");
	dp->dmap_servicing = self->w_tid;
	self->w_task = drv_e;
	self->w_drv_sendrec = reqmp;

	if ((r = asynsend3(drv_e, self->w_drv_sendrec, AMF_NOREPLY)) == OK) {
		/* Yield execution until we've received the reply */
		worker_wait();
	} else {
		printf("VFS: drv_sendrec: error sending msg to driver %d: %d\n",
			drv_e, r);
		util_stacktrace();
	}

	dp->dmap_servicing = INVALID_THREAD;
	self->w_task = NONE;
	self->w_drv_sendrec = NULL;
	unlock_dmap(dp);
	return(OK);
}
Пример #7
0
static void worker_relocate_clients (worker_t *worker)
{
    if (workers == NULL)
        return;
    while (worker->count || worker->pending_count)
    {
        client_t *client = worker->clients, **prevp = &worker->clients;

        worker->wakeup_ms = worker->time_ms + 150;
        worker->current_time.tv_sec = (time_t)(worker->time_ms/1000);
        while (client)
        {
            if (client->flags & CLIENT_ACTIVE)
            {
                client->worker = workers;
                prevp = &client->next_on_worker;
            }
            else
            {
                *prevp = client->next_on_worker;
                worker_add_client (worker, client);
                worker->count--;
            }
            client = *prevp;
        }
        if (worker->clients)
        {
            thread_spin_lock (&workers->lock);
            *workers->pending_clients_tail = worker->clients;
            workers->pending_clients_tail = prevp;
            workers->pending_count += worker->count;
            thread_spin_unlock (&workers->lock);
            worker_wakeup (workers);
            worker->clients = NULL;
            worker->last_p = &worker->clients;
            worker->count = 0;
        }
        worker_wait (worker);
    }
}
Пример #8
0
static int tll_append(tll_t *tllp, tll_access_t locktype)
{
  struct worker_thread *queue;

  assert(self != NULL);
  assert(tllp != NULL);
  assert(locktype != TLL_NONE);

  /* Read-only and write-only requests go to the write queue. Read-serialized
   * requests go to the serial queue. Then we wait for an event to signal it's
   * our turn to go. */
  queue = NULL;
  if (locktype == TLL_READ || locktype == TLL_WRITE) {
	if (tllp->t_write == NULL)
		tllp->t_write = self;
	else
		queue = tllp->t_write;
  } else {
	if (tllp->t_serial == NULL)
		tllp->t_serial = self;
	else
		queue = tllp->t_serial;
  }

  if (queue != NULL) {	/* Traverse to end of queue */
	while (queue->w_next != NULL) queue = queue->w_next;
	queue->w_next = self;
  }
  self->w_next = NULL; /* End of queue */

  /* Now wait for the event it's our turn */
  worker_wait();

  tllp->t_current = locktype;
  tllp->t_status &= ~TLL_PEND;
  tllp->t_owner = self;

  if (tllp->t_current == TLL_READ) {
	tllp->t_readonly++;
	tllp->t_owner = NULL;
  } else if (tllp->t_current == TLL_WRITE)
	assert(tllp->t_readonly == 0);

  /* Due to the way upgrading and downgrading works, read-only requests are
   * scheduled to run after a downgraded lock is released (because they are
   * queued on the write-only queue which has priority). This results from the
   * fact that the downgrade operation cannot know whether the next locktype on
   * the write-only queue is really write-only or actually read-only. However,
   * that means that read-serialized requests stay queued, while they could run
   * simultaneously with read-only requests. See if there are any and grant
   * the head request access */
  if (tllp->t_current == TLL_READ && tllp->t_serial != NULL) {
	tllp->t_owner = tllp->t_serial;
	tllp->t_serial = tllp->t_serial->w_next;
	tllp->t_owner->w_next = NULL;
	assert(!(tllp->t_status & TLL_PEND));
	tllp->t_status |= TLL_PEND;
	worker_signal(tllp->t_owner);
  }

  return(OK);
}
Пример #9
0
void *worker (void *arg)
{
    worker_t *worker = arg;
    long prev_count = -1;
    client_t **prevp = &worker->clients;

    worker->running = 1;
    worker->wakeup_ms = (int64_t)0;
    worker->time_ms = timing_get_time();

    while (1)
    {
        client_t *client = *prevp;
        uint64_t sched_ms = worker->time_ms + 2;

        while (client)
        {
            if (client->worker != worker) abort();
            /* process client details but skip those that are not ready yet */
            if (client->flags & CLIENT_ACTIVE)
            {
                int ret = 0;
                client_t *nx = client->next_on_worker;

                if (worker->running == 0 || client->schedule_ms <= sched_ms)
                {
                    ret = client->ops->process (client);
                    if (ret < 0)
                    {
                        client->worker = NULL;
                        if (client->ops->release)
                            client->ops->release (client);
                    }
                    if (ret)
                    {
                        worker->count--;
                        if (nx == NULL) /* is this the last client */
                            worker->last_p = prevp;
                        client = *prevp = nx;
                        continue;
                    }
                }
                if ((client->flags & CLIENT_ACTIVE) && client->schedule_ms < worker->wakeup_ms)
                    worker->wakeup_ms = client->schedule_ms;
            }
            prevp = &client->next_on_worker;
            client = *prevp;
        }
        if (prev_count != worker->count)
        {
            DEBUG2 ("%p now has %d clients", worker, worker->count);
            prev_count = worker->count;
        }
        if (worker->running == 0)
        {
            if (global.running == ICE_RUNNING)
                break;
            if (worker->count == 0 && worker->pending_count == 0)
                break;
        }
        prevp = worker_wait (worker);
    }
    worker_relocate_clients (worker);
    INFO0 ("shutting down");
    return NULL;
}
Пример #10
0
void *worker (void *arg)
{
    worker_t *worker = arg;
    long prev_count = -1;
    client_t **prevp = &worker->clients;
    uint64_t c = 0;

    worker->running = 1;
    worker->wakeup_ms = (int64_t)0;
    worker->time_ms = timing_get_time();

    while (1)
    {
        client_t *client = *prevp;
        uint64_t sched_ms = worker->time_ms + 12;

        c = 0;
        while (client)
        {
            if (client->worker != worker) abort();
            /* process client details but skip those that are not ready yet */
            if (client->flags & CLIENT_ACTIVE)
            {
                int ret = 0;
                client_t *nx = client->next_on_worker;

                int process = (worker->running == 0 || client->schedule_ms <= sched_ms) ? 1 : 0;
                if (process)
                {
                    if (c > 300 && c & 1)  // only process alternate clients after so many
                       process = 0;
                }
                else if (client->wakeup && *client->wakeup)
                {
                    if (c & 1)
                        process = 1; // enable this one to pass through
                    else
                        client->schedule_ms = worker->time_ms;
                }

                if (process)
                {
                    c++;
                    if ((c & 31) == 0)
                    {
                        // update these after so many to keep in sync
                        worker->time_ms = timing_get_time();
                        worker->current_time.tv_sec = (time_t)(worker->time_ms/1000);
                    }
                    ret = client->ops->process (client);
                    if (ret < 0)
                    {
                        client->worker = NULL;
                        if (client->ops->release)
                            client->ops->release (client);
                    }
                    if (ret)
                    {
                        worker->count--;
                        if (nx == NULL) /* is this the last client */
                            worker->last_p = prevp;
                        client = *prevp = nx;
                        continue;
                    }
                }
                if ((client->flags & CLIENT_ACTIVE) && client->schedule_ms < worker->wakeup_ms)
                    worker->wakeup_ms = client->schedule_ms;
            }
            prevp = &client->next_on_worker;
            client = *prevp;
        }
        if (prev_count != worker->count)
        {
            DEBUG2 ("%p now has %d clients", worker, worker->count);
            prev_count = worker->count;
        }
        if (worker->running == 0)
        {
            if (global.running == ICE_RUNNING)
                break;
            if (worker->count == 0 && worker->pending_count == 0)
                break;
        }
        prevp = worker_wait (worker);
    }
    worker_relocate_clients (worker);
    INFO0 ("shutting down");
    return NULL;
}
Пример #11
0
int
main(int argc, char *argv[])
{
    char *unixctl_path = NULL;
    struct unixctl_server *unixctl;
    struct signal *sighup;
    char *remote;
    bool exiting;
    int retval;

    proctitle_init(argc, argv);
    set_program_name(argv[0]);
    stress_init_command();
    remote = parse_options(argc, argv, &unixctl_path);
    signal(SIGPIPE, SIG_IGN);
    sighup = signal_register(SIGHUP);
    process_init();
    ovsrec_init();

    daemonize_start();

    if (want_mlockall) {
#ifdef HAVE_MLOCKALL
        if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
            VLOG_ERR("mlockall failed: %s", ovs_strerror(errno));
        }
#else
        VLOG_ERR("mlockall not supported on this system");
#endif
    }

    worker_start();

    retval = unixctl_server_create(unixctl_path, &unixctl);
    if (retval) {
        exit(EXIT_FAILURE);
    }
    unixctl_command_register("exit", "", 0, 0, ovs_vswitchd_exit, &exiting);

    bridge_init(remote);
    free(remote);

    exiting = false;
    while (!exiting) {
        worker_run();
        if (signal_poll(sighup)) {
            vlog_reopen_log_file();
        }
        memory_run();
        if (memory_should_report()) {
            struct simap usage;

            simap_init(&usage);
            bridge_get_memory_usage(&usage);
            memory_report(&usage);
            simap_destroy(&usage);
        }
        bridge_run_fast();
        bridge_run();
        bridge_run_fast();
        unixctl_server_run(unixctl);
        netdev_run();

        worker_wait();
        signal_wait(sighup);
        memory_wait();
        bridge_wait();
        unixctl_server_wait(unixctl);
        netdev_wait();
        if (exiting) {
            poll_immediate_wake();
        }
        poll_block();
    }
    bridge_exit();
    unixctl_server_destroy(unixctl);

    return 0;
}
Пример #12
0
void *worker (void *arg)
{
    worker_t *worker = arg;
    long prev_count = -1;
    client_t **prevp = &worker->clients;
    uint64_t c = 0;

    worker->running = 1;
    worker->wakeup_ms = (int64_t)0;
    worker->time_ms = timing_get_time();

    while (1)
    {
        client_t *client = *prevp;
        uint64_t sched_ms = worker->time_ms + 12;

        c = 0;
        while (client)
        {
            if (client->worker != worker) abort();
            /* process client details but skip those that are not ready yet */
            if (client->flags & CLIENT_ACTIVE)
            {
                int ret = 0;
                client_t *nx = client->next_on_worker;

                int process = 1;
                if (worker->running)  // force all active clients to run on worker shutdown
                {
                    if (client->schedule_ms <= sched_ms)
                    {
                        if (c > 9000 && client->wakeup == NULL)
                            process = 0;
                    }
                    else if (client->wakeup == NULL || *client->wakeup == 0)
                    {
                        process = 0;
                    }
                }

                if (process)
                {
                    if ((c & 511) == 0)
                    {
                        // update these periodically to keep in sync
                        worker->time_ms = worker_check_time_ms (worker);
                        worker->current_time.tv_sec = (time_t)(worker->time_ms/1000);
                    }
                    c++;
                    ret = client->ops->process (client);
                    if (ret < 0)
                    {
                        client->worker = NULL;
                        if (client->ops->release)
                            client->ops->release (client);
                    }
                    if (ret)
                    {
                        worker->count--;
                        if (nx == NULL) /* is this the last client */
                            worker->last_p = prevp;
                        client = *prevp = nx;
                        continue;
                    }
                }
                if ((client->flags & CLIENT_ACTIVE) && client->schedule_ms < worker->wakeup_ms)
                    worker->wakeup_ms = client->schedule_ms;
            }
            prevp = &client->next_on_worker;
            client = *prevp;
        }
        if (prev_count != worker->count)
        {
            DEBUG2 ("%p now has %d clients", worker, worker->count);
            prev_count = worker->count;
        }
        if (worker->running == 0)
        {
            if (global.running == ICE_RUNNING)
                break;
            if (worker->count == 0 && worker->pending_count == 0)
                break;
        }
        prevp = worker_wait (worker);
    }
    worker_relocate_clients (worker);
    INFO0 ("shutting down");
    return NULL;
}