예제 #1
0
void *
threadmain(void *args)
{
	int	i, idx;

	testfunc_t	func;
	threaddata_t	*td = (threaddata_t *) args;

	TEST_SRAND(((int)TIME()) * td->tid);

	thread_barrier();

	if (!threadstress) MSG("tid=%3d> starting.", td->tid);

	for (i = 0; i < iters; i++) {
		idx = TEST_RAND(0,functions_num-1);
		func = test_functions[idx];
		assert(func != NULL);

		func(td);
                if (td->ltid == 0 && !threadstress) TEST_PROGRESS_BAR(i, iters);
	}

	thread_barrier();
	if (!threadstress) MSG("tid=%3d> done.", td->tid);

	return NULL;
}
예제 #2
0
int
ACE_TMAIN(int argc, ACE_TCHAR *argv[])
{
  ACE_Get_Opt get_opts (argc, argv, ACE_TEXT("s:c:"));
  int c = -1;
  const ACE_TCHAR *client_cmd = 0;

  while ((c = get_opts ()) != -1)
    switch (c)
      {
      case 'c':
        client_cmd = get_opts.opt_arg ();
        ACE_DEBUG ((LM_DEBUG, "Client argument: %s\n", client_cmd));
        break;
      case 's':
        server_cmd = get_opts.opt_arg ();
        ACE_DEBUG ((LM_DEBUG, "Server argument: %s\n", server_cmd));
        break;
      default:
        ACE_ERROR_RETURN ((LM_ERROR,
                           "Usage: collocation_test -s \"server opts\" -c \"client opts\""),
                          -1);
      }

  ACE_TCHAR cmd_line[1024];
  ACE_OS::strcpy (cmd_line, ACE_TEXT("client "));
  if (client_cmd != 0)
    ACE_OS::strcat (cmd_line, client_cmd);
  ACE_OS::strcat (cmd_line, ACE_TEXT(" -f ") THE_IOR);
  ACE_ARGV args (cmd_line);

  Barriers thread_barrier (2);

  int retv = 1;

  ACE_DEBUG ((LM_DEBUG,
              "\n \t IDL_Cubit: Collocation test \n\n"));

  ACE_Thread_Manager tm;
  tm.spawn (reinterpret_cast<ACE_THR_FUNC> (&svr_worker),
            &thread_barrier);
  thread_barrier.server_init_.wait ();
  ACE_OS::sleep (1);

  Cubit_Client cubit_client (1);
  // Make sure the server shuts itself down afterward.

  if (cubit_client.init (args.argc (), args.argv ()) == -1)
    return 1;
  else
    retv = cubit_client.run ();

  thread_barrier.client_fini_.wait ();
  tm.wait ();

  ACE_OS::unlink (THE_IOR);
  return retv;
}
예제 #3
0
파일: qsort.c 프로젝트: Hkau/kth
void addsort(int *v, int first, int last)
{
	// thread pool stuff
	pthread_mutex_lock(&order_lock);

	// find a thread that can take a beating (more work)
	struct qsthread *thread = NULL;

	// if one's free, that's our thread
	if(num_free_threads != 0)
	{
		thread = free_thread_pool[--num_free_threads];
	}
	// else, if we're able to spawn a new one, do it
	else if(spawned_threads < MAX_THREADS)
	{
		thread_pool[spawned_threads] = thread_default;
		thread = &thread_pool[spawned_threads];
		pthread_create(&thread_pool[spawned_threads].thread, &thread_attr, run_thread, (void *)thread);

		thread_barrier();

		++spawned_threads;
	}

	pthread_mutex_unlock(&order_lock);

	// if there's one to give work to, assign the order to it
	if(thread != NULL)
	{
		thread->v = v;
		thread->first = first;
		thread->last = last;

		pthread_mutex_lock(&thread->wait_lock);
		pthread_cond_signal(&thread->start_signal);
		pthread_mutex_unlock(&thread->wait_lock);
	}
	else
		// no more threads available, work yourself.
		psort(v, first, last);
}
예제 #4
0
파일: qsort.c 프로젝트: Hkau/kth
// thread starting point
void *run_thread(void *thread_ptr)
{
	struct qsthread *thread = (struct qsthread *)thread_ptr;

	pthread_mutex_lock(&thread->wait_lock);

	// let spawning thread know that we've started ok.
	thread_barrier();

	while(1)
	{
		pthread_cond_wait(&thread->start_signal, &thread->wait_lock);

		// NULL list means main thread wants us to quit
		if(thread->v == NULL)
			pthread_exit(NULL);
		// otherwise, sort thread
		psort(thread->v, thread->first, thread->last);

		pthread_mutex_lock(&order_lock);

		// if completing thread's current work means all started threads
		// are free, then work is done signal back to main thread
		if(num_free_threads == spawned_threads-1)
		{
			size_t i = 0;
			for(i = 0; i < num_free_threads; ++i)
			{
				free_thread_pool[i]->v = NULL;
				pthread_cond_signal(&free_thread_pool[i]->start_signal);
			}
			pthread_mutex_unlock(&order_lock);
			pthread_mutex_unlock(&thread->wait_lock);
			break;
		}
		// otherwise add thread back to pool and be available for more work
		free_thread_pool[num_free_threads++] = thread;
		pthread_mutex_unlock(&order_lock);
	}
	return NULL;
}
예제 #5
0
파일: manual_event.cpp 프로젝트: CCJY/ACE
int
ACE_TMAIN (int argc, ACE_TCHAR **argv)
{
  int n_threads = argc == 2 ? ACE_OS::atoi (argv[1]) : 5;

  ACE_Thread_Manager &tm = *ACE_Thread_Manager::instance ();

  // synch object shared by all threads
  Pseudo_Barrier thread_barrier (n_threads);

  // create workers
  if (tm.spawn_n (n_threads, (ACE_THR_FUNC) worker, &thread_barrier) == -1)
    ACE_ERROR_RETURN ((LM_ERROR, "thread creates for worker failed"), -1);

  // wait for all workers to exit
  if (tm.wait () == -1)
    ACE_ERROR_RETURN ((LM_ERROR, "thread wait failed"), -1);
  else
    ACE_DEBUG ((LM_ERROR, "graceful exit\n"));

  return 0;
}
void* main_thrd_work(void* thrd_args) {
  thrd_args_t *my_data = (thrd_args_t*) thrd_args;
  int t = my_data->id;
  Matrix images = my_data->images;
  int chunk = images->row_dim / NUM_THREADS;
  int remainder = images->row_dim % NUM_THREADS;
  Subspace *s = my_data->s;
  int start;
  int end;

  start = (t==0) ? 0 : t*chunk;
  start += (t <= remainder) ? t:remainder;
  end = (t < remainder)? 1:0;
  end += start + chunk;

  /*printf("thread %d sub start %d end %d\n",t,start,end);*/
  thrd_mean_subtract_images(images, s->mean, start, end);

  thread_barrier(t, thrd_barrier);

  /* the mean subtract is in synch */

  chunk = s->basis->col_dim / NUM_THREADS;
  remainder = s->basis->col_dim % NUM_THREADS;

  start = (t==0) ? 0 : t*chunk;
  start += (t <= remainder) ? t:remainder;
  end = (t < remainder)? 1:0;
  end += start + chunk;
  
  /*printf("thread %d mul start %d end %d\n",t,start,end);*/
  thrd_transposeMultiplyMatrixL(s->basis, images, my_data->subspims,
				start, end);

  return NULL;
}
예제 #7
0
파일: client.cpp 프로젝트: OspreyHub/ATCD
int
Task::svc (void)
{
  try
    {
      // Priority Mapping Manager.
      CORBA::Object_var object =
        this->orb_->resolve_initial_references ("PriorityMappingManager");
      RTCORBA::PriorityMappingManager_var mapping_manager =
        RTCORBA::PriorityMappingManager::_narrow (object.in ());
      if (check_for_nil (mapping_manager.in (), "Mapping Manager") == -1)
        return -1;

      RTCORBA::PriorityMapping *pm =
        mapping_manager->mapping ();

      // RTCurrent.
      object =
        this->orb_->resolve_initial_references ("RTCurrent");
      RTCORBA::Current_var current =
        RTCORBA::Current::_narrow (object.in ());
      if (check_for_nil (current.in (), "RTCurrent") == -1)
        return -1;

      // Obtain Test object reference.
      object =
        this->orb_->string_to_object (ior);
      Test_var server = Test::_narrow (object.in ());
      if (check_for_nil (server.in (), "Test object") == -1)
        return -1;

      // Check that test object is configured with CLIENT_PROPAGATED
      // PriorityModelPolicy.
      CORBA::Policy_var policy =
        server->_get_policy (RTCORBA::PRIORITY_MODEL_POLICY_TYPE);

      RTCORBA::PriorityModelPolicy_var priority_policy =
        RTCORBA::PriorityModelPolicy::_narrow (policy.in ());

      if (check_for_nil (priority_policy.in (), "PriorityModelPolicy") == -1)
        return -1;

      RTCORBA::PriorityModel priority_model =
        priority_policy->priority_model ();
      if (priority_model != RTCORBA::CLIENT_PROPAGATED)
        ACE_ERROR_RETURN ((LM_ERROR,
                           "ERROR: priority_model != "
                           "RTCORBA::CLIENT_PROPAGATED!\n"),
                          -1);

      // Spawn two worker threads.
      ACE_Barrier thread_barrier (2);
      int flags  =
        THR_NEW_LWP |
        THR_JOINABLE |
        this->orb_->orb_core ()->orb_params ()->thread_creation_flags ();

      // Worker 1.
      Worker_Thread worker1 (this->orb_.in (),
                             server.in (),
                             protocol1,
                             &thread_barrier);

      CORBA::Short native_priority1 = 0;
      if (pm->to_native (priority1, native_priority1) == 0)
        ACE_ERROR_RETURN ((LM_ERROR,
                           "Cannot convert corba priority %d to native priority\n",
                           priority1),
                          -1);

      if (worker1.activate (flags,
                            1, 0,
                            native_priority1) != 0)
        ACE_ERROR_RETURN ((LM_ERROR,
                           "Cannot activate first client worker threads\n"),
                          -1);

      // Worker 2.
      Worker_Thread worker2 (this->orb_.in (),
                             server.in (),
                             protocol2,
                             &thread_barrier);

      CORBA::Short native_priority2 = 0;
      if (pm->to_native (priority2, native_priority2) == 0)
        ACE_ERROR_RETURN ((LM_ERROR,
                           "Cannot convert corba priority %d to native priority\n",
                           priority2),
                          -1);

      if (worker2.activate (flags,
                            1, 0,
                            native_priority2) != 0)
        ACE_ERROR_RETURN ((LM_ERROR,
                           "Cannot activate second client worker threads\n"),
                          -1);

      // Wait for worker threads to finish.
      ACE_Thread_Manager::instance ()->wait ();

      // Testing over.  Shut down the server.
      ACE_DEBUG ((LM_DEBUG, "Client threads finished\n"));
      current->the_priority (priority1);
      server->shutdown ();
    }
  catch (const CORBA::Exception& ex)
    {
      ex._tao_print_exception (
        "Unexpected exception in MT_Client_Protocol_Priority test client:");
      return -1;
    }

  return 0;
}
/** Main program. Here is the actual usage of mylib shown. */
int main(int argc, char **argv)
{
  int N = 10, num_threads = 4;
  mylib_ThreadFactory tfactory;
  Barrier thread_barrier(num_threads);
  std::vector<mylib_ThreadControl> tcontrol(num_threads);
  std::vector<std::thread> threads(num_threads);
  std::vector<ArgumentT> args(num_threads);

  /* Create global thread manager and register C++11 thread synchronization routine.
   * The following three lines could be merged into a single line by providing a convenience routine
   * mylib_ThreadFactory_create_cpp11threads(...);
   */
  mylib_ThreadFactory_create(&tfactory);
  tfactory->sync = cpp11thread_sync;
  tfactory->sync_data = (void*)&thread_barrier;

  /* Create vectors with data. Using malloc() for the sake of uniformity with the other two examples. */
  std::vector<double> v1(N);
  std::vector<double> v2(N);
  std::vector<double> v3(N);

  /* Set entries in v1 and v2 */
  for (int i=0; i<N; ++i)
  {
    v1[i] = i;
    v2[i] = N - i;
  }

  /*
   *  First operation: Add entries.
   *  Generate a per-thread thread control, wrap arguments, and launch thread.
   */
  for (int i=0; i<num_threads; ++i)
  {
    mylib_ThreadFactory_create_control(tfactory, tcontrol.data() + i);
    tcontrol[i]->tid   = i;
    tcontrol[i]->tsize = num_threads;

    /* Note: I could not find a way of passing 'mylib_vector_add' and arguments directly to std::thread(), hence this pthread-like workaround */
    args[i].tcontrol = tcontrol[i];
    args[i].v1 = v1.data();
    args[i].v2 = v2.data();
    args[i].v3 = v3.data();
    args[i].N  = N;
    threads[i] = std::thread(threaded_add, (void*)&args[i]);
  }

  /** Wait for threads to complete. Clean up thread control object. */
  for (int i=0; i<num_threads; ++i)
  {
    threads[i].join();
    mylib_ThreadFactory_destroy_control(tfactory, tcontrol[i]);
  }

  std::cout << "Result of vector addition: ";
  for (int i = 0; i<N; ++i)
    std::cout << v3[i] << " ";
  std::cout << std::endl;



  /*
   *  Second operation: Compute dot product.
   *  Same control flow as before: Create thread control object, wrap function arguments, launch thread.
   */
  for (int i=0; i<num_threads; ++i)
  {
    mylib_ThreadFactory_create_control(tfactory, tcontrol.data() + i);
    tcontrol[i]->tid   = i;
    tcontrol[i]->tsize = num_threads;

    /* Note: I could not find a way of passing 'mylib_vector_add' and arguments directly to std::thread(), hence this pthread-like workaround */
    args[i].tcontrol = tcontrol[i];
    args[i].v1 = v1.data();
    args[i].v2 = v2.data();
    args[i].v3 = v3.data();
    args[i].N  = N;
    threads[i] = std::thread(threaded_dot, (void*)&args[i]);
  }

  /** Wait for threads to complete. Clean up thread control object. */
  for (int i=0; i<num_threads; ++i)
  {
    threads[i].join();
    mylib_ThreadFactory_destroy_control(tfactory, tcontrol[i]);
  }

  std::cout << "Result of dot product: " << v3[0] << std::endl;

  /* Clean up */
  mylib_ThreadFactory_destroy(tfactory);

  return EXIT_SUCCESS;
}
void thrd_subvq_gautbl_eval_logs3 (int subvec_num, subvq_t *vq, float32 *feat)
{

  if (NUM_THREADS == 1) {
    int32 s, i;
    int32 *featdim;
    
    for (s=0; s < vq->n_sv; s++) {
      /* Extract subvector from feat */
      featdim = vq->featdim[s];
      for (i = 0; i < vq->gautbl[s].veclen; i++) {
	vq->subvec[i] = feat[featdim[i]];
      }

      if (s < VQ_EVAL)
	/* Evaluate distances between extracted subvector and corresponding codebook */
	
	/* RAH, only evaluate the first VQ_EVAL set of features */
	vector_gautbl_eval_logs3(&(vq->gautbl[s]), 0, vq->vqsize, vq->subvec, vq->vqdist[s]);
    }
    
  } else { /* NUM_THREADS > 1 */


  int32 s, i;
  int32 *featdim;
  const int32 vqsize = vq->vqsize;
  const int32 n_sv = vq->n_sv;
  const int32 remainder = (vqsize * n_sv)%NUM_THREADS;
  int32 chunk_size = (vqsize * n_sv)/NUM_THREADS;
  int32 offset;
  int32 subv_ex;
  
  s = subvec_num*chunk_size; /* subvec_num is now the thread no. */
  if (remainder>subvec_num) { s++; chunk_size++; };

  offset = s % vqsize;
  s /= vqsize; /* determine which vector to process */
  if (DEBUG&0x4) fprintf(stderr,"thrd %d chunk_size %d offset %d s %d\n",
			 subvec_num,chunk_size, offset, s);

  assert(s<n_sv && "subvec index out of range!!\n");

#if (NUM_THREADS>1)
  pthread_mutex_lock(&update_lock);

  subv_ex = (sema++)%NUM_THREADS;

  pthread_mutex_unlock(&update_lock);
#else
  subv_ex = (sema++)%NUM_THREADS;
#endif

  if (subv_ex < n_sv) {
    if (DEBUG&0x4) 
      fprintf(stderr,"thread %d subvec %d veclen %d\n",subvec_num,subv_ex,
	      vq->gautbl[subv_ex].veclen);
    /* Extract subvector from feat */
    featdim = vq->featdim[subv_ex];
    for (i = 0; i < vq->gautbl[subv_ex].veclen; i++)
      vq->thrd_subvec[subv_ex][i] = feat[featdim[i]];
  }

  if (DEBUG&0x4) fprintf(stderr,"thread %d before barrier \n",subvec_num);
  
  thread_barrier(subvec_num,score_barrier);

  if (DEBUG&0x4) fprintf(stderr,"thread %d after barrier \n",subvec_num);

  /* Evaluate distances between extracted subvector and corresponding codebook */
  
  /* RAH, only evaluate the first VQ_EVAL set of features */
  if (s < VQ_EVAL) {
    if (offset+chunk_size > vqsize) {
      if (DEBUG&0x4) fprintf(stderr,"thread %d subvec_eval %d offset %d size %d\n",
			     subvec_num,s,offset,vqsize-offset);
      vector_gautbl_eval_logs3(&(vq->gautbl[s]), offset, vqsize-offset, vq->thrd_subvec[s], vq->vqdist[s]);
      chunk_size -= (vqsize-offset);
    } else {
      if (DEBUG&0x4) fprintf(stderr,"thread %d subvec_eval %d offset %d size %d\n",
			     subvec_num,s,offset,chunk_size);
      vector_gautbl_eval_logs3(&(vq->gautbl[s]), offset, chunk_size, vq->thrd_subvec[s], vq->vqdist[s]);
      chunk_size = 0;
    }
    
    while (chunk_size>0) {
      s++;
      if (chunk_size > vqsize) {
	if (DEBUG&0x4) fprintf(stderr,"thread %d subvec_eval %d offset %d size %d\n",
			       subvec_num,s,0,vqsize);
	vector_gautbl_eval_logs3(&(vq->gautbl[s]), 0, vqsize, vq->thrd_subvec[s], vq->vqdist[s]);
	chunk_size-=vqsize;
      } else {
	if (DEBUG&0x4) fprintf(stderr,"thread %d subvec_eval %d offset %d size %d\n",
			       subvec_num,s,0,chunk_size);
	vector_gautbl_eval_logs3(&(vq->gautbl[s]), 0, chunk_size, vq->thrd_subvec[s], vq->vqdist[s]);
	chunk_size=0;
      }

    }
  }

  thread_barrier(subvec_num,score_barrier);

  }
}
예제 #10
0
int
run_main (int argc, ACE_TCHAR *argv[])
{
  ACE_START_TEST (ACE_TEXT ("Upgradable_RW_Test"));
  int status = 0;

#if defined (ACE_HAS_THREADS)
  parse_args (argc, argv);
#if !defined (RW_MUTEX)
  use_try_upgrade = 0;
  // make sure that we have to acquire the write lock
#endif /* RW_MUTEX */

  current_readers = 0; // Possibly already done
  current_writers = 0; // Possibly already done

  init ();

  ACE_DEBUG ((LM_DEBUG,
              ACE_TEXT (" (%t) main thread starting\n")));

  Time_Calculation time_Calculation;
  // for the time calculation

  ACE_Barrier thread_barrier (n_readers + n_writers);
  // for a nice start of all threads (for much contention)

  // Initialize the readers.
  Reader_Task **reader_tasks = 0;

  ACE_NEW_RETURN (reader_tasks,
                  Reader_Task *[n_readers],
                  -1);
  u_int i = 0;

  for (i = 0;
       i < n_readers;
       i++)
    {
      ACE_NEW_RETURN (reader_tasks[i],
                      Reader_Task (time_Calculation,
                                  thread_barrier),
                      -1);

      reader_tasks[i]->activate (thr_flags,
                                 1,
                                 0,
                                 ACE_DEFAULT_THREAD_PRIORITY);
    }

  // Create all the writers
  Writer_Task **writer_tasks = 0;

  ACE_NEW_RETURN (writer_tasks,
                  Writer_Task *[n_writers],
                  -1);

  for (i = 0;
       i < n_writers;
       i++)
    {
      ACE_NEW_RETURN (writer_tasks[i],
                      Writer_Task (time_Calculation,
                                  thread_barrier),
                      -1);

      writer_tasks[i]->activate (thr_flags,
                                 1,
                                 0,
                                 ACE_DEFAULT_THREAD_PRIORITY);
    }

  // Wait a maximum of 1 second per iteration.
  const ACE_Time_Value max_wait (n_iterations * 1);
  const ACE_Time_Value wait_time (ACE_OS::gettimeofday () + max_wait);
  if (ACE_Thread_Manager::instance ()->wait (&wait_time) == -1)
    {
      if (errno == ETIME)
        ACE_ERROR ((LM_ERROR,
                    ACE_TEXT ("maximum wait time of %d msec exceeded\n"),
                               max_wait.msec ()));
      else
        ACE_OS::perror (ACE_TEXT ("wait"));

      status = -1;
    }

  // compute average time.
  time_Calculation.print_stats ();

  if (not_upgraded != 0 || upgraded != 0)
    ACE_DEBUG ((LM_DEBUG,
                ACE_TEXT ("upgraded to not upgraded ratio = %f\n"),
                (float) upgraded / (float) (not_upgraded + upgraded)));

  ACE_DEBUG ((LM_DEBUG,
              ACE_TEXT ("Number of times, that find was called: %d\n"),
              find_called));


  ACE_DEBUG ((LM_DEBUG,
              ACE_TEXT (" (%t) exiting main thread\n")));

  // Delete the memory of the Double_Linked_List
  ACE_CString *cString_ptr = 0;
  Element *element_ptr = 0;

  for (i = 0;
       i < n_entries;
       i++)
    {
      if (0 != (element_ptr = linked_list_ptr->delete_head ()))
        {
          cString_ptr = element_ptr->value ();
          delete cString_ptr;
          delete element_ptr;
        }
    }

  delete linked_list_ptr;

  for (i = 0;
       i < n_writers;
       i++)
    delete writer_tasks[i];

  delete [] writer_tasks;

  for (i = 0;
       i < n_readers;
       i++)
    delete reader_tasks [i];

  delete [] reader_tasks;
#else
  ACE_UNUSED_ARG (argc);
  ACE_UNUSED_ARG (argv);
  ACE_ERROR ((LM_INFO,
              ACE_TEXT ("threads not supported on this platform\n")));
#endif /* ACE_HAS_THREADS */

  ACE_END_TEST;
  return status;
}