dfs_task_manager(std::size_t concurrency = std::thread::hardware_concurrency())
     : spawned_threads_{0}
     , concurrency_{0}
     , idle_threads_{0}
 {
     set_concurrency(concurrency);
 }
Beispiel #2
0
void submit_job(JOB_CRUNCHER f, void *data)
{
set_concurrency(get_max_threads());

thread_mutex_lock(&jobs_mutex);
if(jobs_free>=jobs_size)expand_jobs();
jobs_submitted++;
job[jobs_free].data=data;
job[jobs_free].job=f;
jobs_free++;
thread_cond_broadcast(&wait_for_more_jobs_condition);
thread_mutex_unlock(&jobs_mutex);
}
Beispiel #3
0
void init_threads(int mt)
{
max_threads=mt-1;
if(max_threads<0) {
	max_threads=cpu_count()-1;
	}
if(max_threads<0) max_threads=0;

thread_id=do_alloc(max_threads, sizeof(*thread_id));
num_threads=0;
threads_started=0;
thread_cond_init(&thread_not_needed);
thread_mutex_init(&thread_num_mutex);
set_concurrency(1);
fprintf(stderr, "maximum threads: %d\n", max_threads+1);
fprintf(LOG, "maximum threads: %d\n", max_threads+1);
}
Beispiel #4
0
int do_single_job(int thread_id)
{
long i;
thread_mutex_lock(&jobs_mutex);
i=next_job_to_do;
if(i>=jobs_free){
	thread_mutex_unlock(&jobs_mutex);
	return 0;
	}
next_job_to_do++;
thread_mutex_unlock(&jobs_mutex);
job[i].job(thread_id, job[i].data);
thread_mutex_lock(&jobs_mutex);
jobs_done++;
if(jobs_done==jobs_submitted) {
	thread_cond_broadcast(&all_done_condition);
	set_concurrency(1);
	thread_mutex_unlock(&jobs_mutex);
	return 0;
	}
thread_mutex_unlock(&jobs_mutex);
return 1;
}
Beispiel #5
0
/*
 * This program acts as a generic gateway. It listens for connections
 * to a local address ('-l' option). Upon accepting a client connection,
 * it connects to the specified remote address ('-r' option) and then
 * just pumps the data through without any modification.
 */
int main(int argc, char *argv[])
{
  extern char *optarg;
  int opt, sock, n;
  int laddr, raddr, num_procs;
  int serialize_accept = 0;
  struct sockaddr_in lcl_addr, cli_addr;
  st_netfd_t cli_nfd, srv_nfd;

  prog = argv[0];
  num_procs = laddr = raddr = 0;

  /* Parse arguments */
  while((opt = getopt(argc, argv, "l:r:p:Sh")) != EOF) {
    switch (opt) {
    case 'l':
      read_address(optarg, &lcl_addr);
      laddr = 1;
      break;
    case 'r':
      read_address(optarg, &rmt_addr);
      if (rmt_addr.sin_addr.s_addr == INADDR_ANY) {
	fprintf(stderr, "%s: invalid remote address: %s\n", prog, optarg);
	exit(1);
      }
      raddr = 1;
      break;
    case 'p':
      num_procs = atoi(optarg);
      if (num_procs < 1) {
	fprintf(stderr, "%s: invalid number of processes: %s\n", prog, optarg);
	exit(1);
      }
      break;
    case 'S':
      /*
       * Serialization decision is tricky on some platforms. For example,
       * Solaris 2.6 and above has kernel sockets implementation, so supposedly
       * there is no need for serialization. The ST library may be compiled
       * on one OS version, but used on another, so the need for serialization
       * should be determined at run time by the application. Since it's just
       * an example, the serialization decision is left up to user.
       * Only on platforms where the serialization is never needed on any OS
       * version st_netfd_serialize_accept() is a no-op.
       */
      serialize_accept = 1;
      break;
    case 'h':
    case '?':
      fprintf(stderr, "Usage: %s -l <[host]:port> -r <host:port> "
	      "[-p <num_processes>] [-S]\n", prog);
      exit(1);
    }
  }
  if (!laddr) {
    fprintf(stderr, "%s: local address required\n", prog);
    exit(1);
  }
  if (!raddr) {
    fprintf(stderr, "%s: remote address required\n", prog);
    exit(1);
  }
  if (num_procs == 0)
    num_procs = cpu_count();

  fprintf(stderr, "%s: starting proxy daemon on %s:%d\n", prog,
	  inet_ntoa(lcl_addr.sin_addr), ntohs(lcl_addr.sin_port));

  /* Start the daemon */
  start_daemon();

  /* Initialize the ST library */
  if (st_init() < 0) {
    print_sys_error("st_init");
    exit(1);
  }

  /* Create and bind listening socket */
  if ((sock = socket(PF_INET, SOCK_STREAM, 0)) < 0) {
    print_sys_error("socket");
    exit(1);
  }
  n = 1;
  if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (char *)&n, sizeof(n)) < 0) {
    print_sys_error("setsockopt");
    exit(1);
  }
  if (bind(sock, (struct sockaddr *)&lcl_addr, sizeof(lcl_addr)) < 0) {
    print_sys_error("bind");
    exit(1);
  }
  listen(sock, 128);
  if ((srv_nfd = st_netfd_open_socket(sock)) == NULL) {
    print_sys_error("st_netfd_open");
    exit(1);
  }
  /* See the comment regarding serialization decision above */
  if (num_procs > 1 && serialize_accept && st_netfd_serialize_accept(srv_nfd)
      < 0) {
    print_sys_error("st_netfd_serialize_accept");
    exit(1);
  }

  /* Start server processes */
  set_concurrency(num_procs);

  for ( ; ; ) {
    n = sizeof(cli_addr);
    cli_nfd = st_accept(srv_nfd, (struct sockaddr *)&cli_addr, &n, -1);
    if (cli_nfd == NULL) {
      print_sys_error("st_accept");
      exit(1);
    }
    if (st_thread_create(handle_request, cli_nfd, 0, 0) == NULL) {
      print_sys_error("st_thread_create");
      exit(1);
    }
  }

  /* NOTREACHED */
  return 1;
}
 ~dfs_task_manager()
 {
     set_concurrency(0);
 }