Esempio n. 1
0
void
scheduler::remote_ready2ready_() noexcept {
    // protect for concurrent access
    std::unique_lock< std::mutex > lk( remote_ready_mtx_);
    // get context from remote ready-queue
    for ( context * ctx : remote_ready_queue_) {
        // store context in local queues
        set_ready( ctx);
    }
    remote_ready_queue_.clear();
}
Esempio n. 2
0
	GLvoid CDKRoomInfoTexture::set_selected(char *class_name, char leading)
	{
		char *info_texture_class_name=strdup(class_name);
		info_texture_class_name[0]=leading;
		info_texture_class_name[1]='E';
		textures[0]=texture_list->get_texture_by_name(info_texture_class_name);
		info_texture_class_name[1]='D';
		textures[1]=texture_list->get_texture_by_name(info_texture_class_name);
		delete info_texture_class_name;
		set_ready(true);

		blink=true;
		set_texture(textures[0]);		
	}	
Esempio n. 3
0
/*
    When any device pulls down READY, READY goes down.
*/
void peribox_device::ready_join(int slot, int state)
{
	LOGMASKED(LOG_READY, "Incoming READY=%d from slot %d\n", state, slot);
	// We store the inverse state
	if (state==CLEAR_LINE)
		m_ready_flag |= (1 << slot);
	else
		m_ready_flag &= ~(1 << slot);

	if (m_ioport_connected)
		set_ready((m_ready_flag != 0)? CLEAR_LINE : ASSERT_LINE);
	else
		m_slot1_ready((m_ready_flag != 0)? CLEAR_LINE : ASSERT_LINE);
}
Esempio n. 4
0
/*
 * start as many threads as might be needed for the current queue
 */
int task_start (TASKQ *q)
{
  int num_tasks = 0;
  TASK *t;

  wait_mutex (q);
  q->stop = 0;
  for (t = q->queue; t != NULL; t = t->next)
    num_tasks++;
  num_tasks -= q->waiting;
  set_ready (q);
  while ((num_tasks-- > 0) && (q->running < q->maxthreads))
  {
    debug ("starting new task\n");
    q->running++;
    t_start (task_run, q);
  }
  end_mutex (q);
  return (0);
}
Esempio n. 5
0
        T fetch_and_add( U inc ) {

          block_until_ready();

          // fetch add unit is now aggregating so add my inc

          participant_count++;
          committed--;
          increment += inc;
        
          // if I'm the last entered client and either the flush threshold
          // is reached or there are no more committed participants then start the flush 
          if ( ready_waiters == 0 && (participant_count >= flush_threshold || committed == 0 )) {
            set_not_ready();
            uint64_t increment_total = increment;
            flat_combiner_fetch_and_add_amount += increment_total;
            auto t = target;
            result = call(target.core(), [t, increment_total]() -> U {
              T * p = t.pointer();
              uint64_t r = *p;
              *p += increment_total;
              return r;
            });
            // tell the others that the result has arrived
            Grappa::broadcast(&untilReceived);
          } else {
            // someone else will start the flush
            Grappa::wait(&untilReceived);
          }

          uint64_t my_start = result;
          result += inc;
          participant_count--;
          increment -= inc;   // for validation purposes (could just set to 0)
          if ( participant_count == 0 ) {
            CHECK( increment == 0 ) << "increment = " << increment << " even though all participants are done";
            set_ready();
          }

          return my_start;
        }
Esempio n. 6
0
/*
 * Stop and wait for all threads to exit (assume we are one of them).
 * If stop is already set, return non-zero.
 */
int task_stop (TASKQ *q)
{
  wait_mutex (q);
  if (q->stop)
  {
    end_mutex (q);
    return (-1);
  }
  q->stop = 1;
  debug ("waiting on tasks to exit...\n");
  while (q->waiting || q->running)
  {
    set_ready (q);
    end_mutex (q);
    sleep (100);
    wait_mutex (q);
  }
  end_mutex (q);
  debug ("stop completed\n");
  return (0);
}
Esempio n. 7
0
void grpc_fd_become_writable(grpc_fd *fd, int allow_synchronous_callback) {
  set_ready(fd, &fd->writest, allow_synchronous_callback);
}
Esempio n. 8
0
void grpc_fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
  set_ready(exec_ctx, fd, &fd->write_closure);
}
Esempio n. 9
0
void grpc_fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
  set_ready(exec_ctx, fd, &fd->read_closure);
}