Ejemplo n.º 1
0
/* Verification start */
static void lmn_worker_start(void *arg)
{
  LmnWorkerGroup *wp;
  LmnWorker *w;
  unsigned long id;

  w = (LmnWorker *)arg;
  wp = worker_group(w);
  id = worker_id(w);
  worker_TLS_init(id);

  mc_react_cxt_init(worker_rc(w));

  if (worker_id(w) == LMN_PRIMARY_ID && mc_is_dump(worker_flags(w))) {
    StateSpaceRef ss = worker_states(w);
    dump_state_data(statespace_init_state(ss), (LmnWord)ss->out, (LmnWord)NULL);
  }

  if (lmn_env.profile_level >= 1) profile_start_exec_thread();
  worker_start(w);

  if (!workers_are_exit(wp) && !workers_have_error(wp)) {
    if (worker_use_owcty(w)) {
      owcty_start(w);
    } else if (worker_use_map(w) && !worker_use_weak_map(w)) {
      map_iteration_start(w);
    }
    /* else: ND-mode etc */
  }

  if (lmn_env.profile_level >= 1) profile_finish_exec_thread();
  mc_react_cxt_destroy(worker_rc(w));
  worker_TLS_finalize();
}
Ejemplo n.º 2
0
/*
 * Adds a result to the end of the result queue.
 *
 * Returns 0 on success;
 *        -1 on failure
 */
static int t_pool_add_result(t_pool_job *j, void *data) {
    t_results_queue *q = j->q;
    t_pool_result *r;

#ifdef DEBUG
    fprintf(stderr, "%d: Adding resulting to queue %p, serial %d\n",
	    worker_id(j->p), q, j->serial);
#endif

    /* No results queue is fine if we don't want any results back */
    if (!q)
	return 0;

    if (!(r = malloc(sizeof(*r))))
	return -1;

    r->next = NULL;
    r->data = data;
    r->serial = j->serial;

    pthread_mutex_lock(&q->result_m);
    if (q->result_tail) {
	q->result_tail->next = r;
	q->result_tail = r;
    } else {
	q->result_head = q->result_tail = r;
    }
    q->queue_len++;
    q->pending--;

#ifdef DEBUG
    fprintf(stderr, "%d: Broadcasting result_avail (id %d)\n",
	    worker_id(j->p), r->serial);
#endif
    pthread_cond_signal(&q->result_avail_c);
#ifdef DEBUG
    fprintf(stderr, "%d: Broadcast complete\n", worker_id(j->p));
#endif

    pthread_mutex_unlock(&q->result_m);

    return 0;
}
Ejemplo n.º 3
0
/* 全てのWorkerオブジェクトで同期を取り, Primary Workerが関数funcを実行する.
 * 全てのWorkerがbarrierに到達したとき処理を再開する. */
void lmn_workers_synchronization(LmnWorker *me, void (*func)(LmnWorker *w))
{
  LmnWorkerGroup *wp = worker_group(me);

  lmn_barrier_wait(&workers_synchronizer(wp));
  if (worker_id(me) == LMN_PRIMARY_ID && func) {
    (*func)(me);
  }
  lmn_barrier_wait(&workers_synchronizer(wp));
}
Ejemplo n.º 4
0
/* 全てのWorkerオブジェクトが実行を停止している場合に真を返す. */
BOOL lmn_workers_termination_detection_for_rings(LmnWorker *root)
{
  LmnWorkerGroup *wp;
  /** 概要:
   *  LmnWorkerは論理的に輪を形成しており, フラグチェックを輪に沿って実施する.
   *  is_activeかどうかをチェックする他にis_stealer, is_whiteもチェックする.
   *    is_white  :
   *       自idより小さいidのworkerにタスクを送信した場合の不正終了を判定するフラグ.
   *       (チェック済みのworkerに対するタスク送信が発生することで不正終了する)
   *       @see Dijkstra's Token Termination Detection (parallel computing)
   *    is_steaker:
   *       異なるWorkerからタスクを横取りしたこを示すフラグ.
   *       (チェック済のworkerが未チェックのworkerからタスクを横取りした場合に不正終了する)
   *
   *  Dijkstraの手法を一部参考にしてはいるが, Dijkstraの手法ではないため注意.
   */

  /* Primary Worker(id==LMN_PRIMARY_ID)が判定した検知結果をグローバル変数に書込む.
   * 他のWorkerはグローバル変数に書き込まれた終了フラグを読み出す. */
  wp = worker_group(root);
  if (worker_id(root) == LMN_PRIMARY_ID && !workers_are_terminated(wp)) {
    int i, n;
    BOOL ret;

    ret = TRUE;
    n   = workers_entried_num(wp);
    for (i = 0; i < n; i++) {
      LmnWorker *w = workers_get_worker(wp, i);
      ret = ret && TERMINATION_CONDITION(w);
      worker_set_white(w);
      if (!ret) return FALSE;
    }

    for (i = 0; i < n; i++) {
      LmnWorker *w = workers_get_worker(wp, i);
      ret = ret && !worker_is_stealer(w);
      worker_unset_stealer(w);
      if (!ret) return FALSE;
    }
    workers_set_terminated(wp);
  }

  return workers_are_terminated(wp);
}
Ejemplo n.º 5
0
void HdividerTests::testConcurrentReadInput()
    {
        map<InputId, int > *input_data = new map<InputId, int >;
        map<int, int> *result = new map<int, int>;
        
        int first_summ = 0;
        int first_elem_summ = 0;
        for (int i = 0; i<100; i++)
        {
                input_data->insert(pair<InputId, int>(i , i));
                
                if (i%2==0)
                {
                    first_summ +=2;
                }
                if (i%3==0)
                {
                    first_summ +=3;
                }
                if (i%5==0)
                {
                    first_summ += 5;
                }
                if (i%7==0)
                {
                    first_summ += 7;
                }
                first_elem_summ += i;
        }
        
        (*result)[2] = (*result)[3] = (*result)[5] = (*result)[7] = (*result)[9] = 0;
        
        HdividerWatcher* watcher = new HdividerWatcher(new HdividerTestInputIdIt (input_data), \
                new HdividerTestStateAccessor());
        
        int nthreads = 1000;
        vector<pthread_t> ths;
        
        for (int i = 0; i<nthreads; i++)
        {
            pthread_t th;
            char bf[10];
            sprintf(bf, "%d", i);
            string worker_id( "worker"+string(bf));
            
            worker_args2 *args1 = new worker_args2(new HdividerTestWorker(watcher), \
                input_data, result, worker_id, 0);
            pthread_create(&th, NULL, worker_func2, (void*)args1);
            ths.push_back(th);
        }
     
        for (int i = 0; i<nthreads; i++)
        {
                pthread_join(ths[i], NULL);
        }
        
        map<int, int>::iterator it = result->begin();
 
        int summ = 0;
        
        summ += (*result)[2] + (*result)[3] + (*result)[5] +(*result)[7];
        
        
        TS_ASSERT(first_summ == summ);
        cout << "first_summ: " << first_summ << endl;
        cout << "summ: " << summ << endl;
        
        // read test
        TS_ASSERT((*result)[9] == first_elem_summ);
        cout << "first_summ: " << first_elem_summ << endl;
        cout << "summ: " << (*result)[9] << endl;
    }
Ejemplo n.º 6
0
/*
 * A worker thread.
 *
 * Each thread waits for the pool to be non-empty.
 * As soon as this applies, one of them succeeds in getting the lock
 * and then executes the job.
 */
static void *t_pool_worker(void *arg) {
    t_pool_worker_t *w = (t_pool_worker_t *)arg;
    t_pool *p = w->p;
    t_pool_job *j;
#ifdef DEBUG_TIME
    struct timeval t1, t2, t3;
#endif

    for (;;) {
	// Pop an item off the pool queue
#ifdef DEBUG_TIME
	gettimeofday(&t1, NULL);
#endif

	pthread_mutex_lock(&p->pool_m);

#ifdef DEBUG_TIME
	gettimeofday(&t2, NULL);
	p->wait_time += TDIFF(t2,t1);
	w->wait_time += TDIFF(t2,t1);
#endif

	// If there is something on the job list and a higher priority
	// thread waiting, let it handle this instead.
//	while (p->head && p->t_stack_top != -1 && p->t_stack_top < w->idx) {
//	    pthread_mutex_unlock(&p->pool_m);
//	    pthread_cond_signal(&p->t[p->t_stack_top].pending_c);
//	    pthread_mutex_lock(&p->pool_m);
//	}

	while (!p->head && !p->shutdown) {
	    p->nwaiting++;

	    if (p->njobs == 0)
		pthread_cond_signal(&p->empty_c);
#ifdef DEBUG_TIME
	    gettimeofday(&t2, NULL);
#endif

#ifdef IN_ORDER
	    // Push this thread to the top of the waiting stack
	    if (p->t_stack_top == -1 || p->t_stack_top > w->idx)
		p->t_stack_top = w->idx;

	    p->t_stack[w->idx] = 1;
	    pthread_cond_wait(&w->pending_c, &p->pool_m);
	    p->t_stack[w->idx] = 0;

	    /* Find new t_stack_top */
	    {
		int i;
		p->t_stack_top = -1;
		for (i = 0; i < p->tsize; i++) {
		    if (p->t_stack[i]) {
			p->t_stack_top = i;
			break;
		    }
		}
	    }
#else
	    pthread_cond_wait(&p->pending_c, &p->pool_m);
#endif

#ifdef DEBUG_TIME
	    gettimeofday(&t3, NULL);
	    p->wait_time += TDIFF(t3,t2);
	    w->wait_time += TDIFF(t3,t2);
#endif
	    p->nwaiting--;
	}

	if (p->shutdown) {
#ifdef DEBUG_TIME
	    p->total_time += TDIFF(t3,t1);
#endif
#ifdef DEBUG
	    fprintf(stderr, "%d: Shutting down\n", worker_id(p));
#endif
	    pthread_mutex_unlock(&p->pool_m);
	    pthread_exit(NULL);
	}

	j = p->head;
	if (!(p->head = j->next))
	    p->tail = NULL;

	if (p->njobs-- >= p->qsize)
	    pthread_cond_signal(&p->full_c);

	if (p->njobs == 0)
	    pthread_cond_signal(&p->empty_c);

	pthread_mutex_unlock(&p->pool_m);
	    
	// We have job 'j' - now execute it.
	t_pool_add_result(j, j->func(j->arg));	
#ifdef DEBUG_TIME
	pthread_mutex_lock(&p->pool_m);
	gettimeofday(&t3, NULL);
	p->total_time += TDIFF(t3,t1);
	pthread_mutex_unlock(&p->pool_m);
#endif
	memset(j, 0xbb, sizeof(*j));
	free(j);
    }

    return NULL;
}