int main() {
	pthread_t id[N];
	parametry param[N];
	int i;

	srand(time(NULL));

	printf("pocz±tek programu, uruchomianie zostanie %d w±tków\n", N);

	/* utworzenie w±tku */
	for (i=0; i < N; i++) {
		param[i].id = i;
		param[i].n = rand() % 100000000 + 1;

		errno = pthread_create(&id[i], NULL, watek, &param[i]);
		test_errno("pthread_create");
	}

	/* stan na mniej wiêcej pó³metku */
	sleep(1);
	puts("po oko³o sekundzie w±tki zu¿y³y:");
	for (i=0; i < N; i++)
		printf("* #%d: %ldms\n", i, get_thread_time(id[i]));

	/* oczekiwanie na zakoñczenie w±tków */
	for (i=0; i < N; i++) {
		errno = pthread_join(id[i], NULL);
		test_errno("pthread_join");
	}

	/* jeszcze podsumowanie */
	puts("");
	printf("g³ówny w±tek zu¿y³ %ldms czasu procesora\n", clock_ms(CLOCK_THREAD_CPUTIME_ID));
	printf("proces zu¿y³ %ldms czasu procesora\n", clock_ms(CLOCK_PROCESS_CPUTIME_ID));

	return EXIT_SUCCESS;
}
예제 #2
0
/* What each thread is doing
*
* In principle this is an endless loop. The only time this loop gets interrupted is once
* thpool_destroy() is invoked or the program exits.
*
* @param  thread        thread that will run this function
* @return nothing
*/
static void* thread_do(Thread* thread) {
    float elapsed;
    int info;
    struct timespec cputime;
    JobQueue* queue;
    WorkGroup* workGroup;
    Job* job;
    thpool_function_type func_buff;
    void* arg_buff;
    int i;

    /* Set thread name for profiling and debugging */
    char thread_name[128] = {0};
    sprintf(thread_name, "thread-pool-%d", thread->id);

#if defined(__linux__)
    /* Use prctl instead to prevent using _GNU_SOURCE flag and implicit declaration */
    prctl(PR_SET_NAME, thread_name);
#elif defined(__APPLE__) && defined(__MACH__)
    pthread_setname_np(thread_name);
#else
    fprintf(stderr, "thread_do(): pthread_setname_np is not supported on this system");
#endif

    /* Assure all threads have been created before starting serving */
    ThPool* thpool = thread->thpool;

    /* Mark thread as alive (initialized) */
    pthread_mutex_lock(&thpool->thcount_lock);
    thpool->num_threads_alive += 1;
    pthread_mutex_unlock(&thpool->thcount_lock);

    queue = thpool->jobqueue;

    while (thpool->threads_keepalive) {

        bsem_wait(queue->has_jobs);

        if (!thpool->threads_keepalive) {
            break;
        }

        pthread_mutex_lock(&thpool->thcount_lock);
        thpool->num_threads_working++;
        pthread_mutex_unlock(&thpool->thcount_lock);

        while (thpool->threads_keepalive) {
            /* Read job from queue and execute it */
            pthread_mutex_lock(&queue->rwmutex);
            workGroup = jobqueue_pull(thpool, thread->id);
            pthread_mutex_unlock(&queue->rwmutex);

            if (workGroup == NULL)
                break;

            if (cppadcg_pool_verbose) {
                get_monotonic_time2(&workGroup->startTime);
            }

            for (i = 0; i < workGroup->size; ++i) {
                job = &workGroup->jobs[i];

                if (cppadcg_pool_verbose) {
                    get_monotonic_time2(&job->startTime);
                }

                int do_benchmark = job->elapsed != NULL;
                if (do_benchmark) {
                    elapsed = -get_thread_time(&cputime, &info);
                }

                /* Execute the job */
                func_buff = job->function;
                arg_buff = job->arg;
                func_buff(arg_buff);

                if (do_benchmark && info == 0) {
                    elapsed += get_thread_time(&cputime, &info);
                    if (info == 0) {
                        (*job->elapsed) = elapsed;
                    }
                }

                if (cppadcg_pool_verbose) {
                    get_monotonic_time2(&job->endTime);
                }
            }

            if (cppadcg_pool_verbose) {
                get_monotonic_time2(&workGroup->endTime);

                if (thread->processed_groups == NULL) {
                    thread->processed_groups = workGroup;
                } else {
                    workGroup->prev = thread->processed_groups;
                    thread->processed_groups = workGroup;
                }
            } else {
                free(workGroup->jobs);
                free(workGroup);
            }
        }

        pthread_mutex_lock(&thpool->thcount_lock);
        thpool->num_threads_working--;
        if (!thpool->num_threads_working) {
            pthread_cond_signal(&thpool->threads_all_idle);
        }
        pthread_mutex_unlock(&thpool->thcount_lock);
    }

    pthread_mutex_lock(&thpool->thcount_lock);
    thpool->num_threads_alive--;
    pthread_mutex_unlock(&thpool->thcount_lock);

    return NULL;
}
예제 #3
0
파일: router.c 프로젝트: amohtasham/rstm
/* =============================================================================
 * router_solve
 * =============================================================================
 */
void
router_solve (void* argPtr)
{
  TM_THREAD_ENTER();

  long threadId = thread_getId();

  router_solve_arg_t* routerArgPtr = (router_solve_arg_t*)argPtr;
  router_t* routerPtr = routerArgPtr->routerPtr;
  maze_t* mazePtr = routerArgPtr->mazePtr;  
  long* numPathArray = routerArgPtr->numPathArray;
  vector_t* myPathVectorPtr = PVECTOR_ALLOC(1);
  assert(myPathVectorPtr);

  queue_t* workQueuePtr = mazePtr->workQueuePtr;
  grid_t* gridPtr = mazePtr->gridPtr;
  grid_t* myGridPtr =
    PGRID_ALLOC(gridPtr->width, gridPtr->height, gridPtr->depth);
  assert(myGridPtr);
  long bendCost = routerPtr->bendCost;
  queue_t* myExpansionQueuePtr = PQUEUE_ALLOC(-1);

  long numPath = 0;
  /*
   * Iterate over work list to route each path. This involves an
   * 'expansion' and 'traceback' phase for each source/destination pair.
   */
  while ((global_timedExecution && !global_isTerminated) || (!global_timedExecution)) {
  //while (1) {
    wait_for_turn(threadId);
    if (global_timedExecution && global_isTerminated)
        break;

    ulong_t beginTime;
    pair_t* coordinatePairPtr;
    TM_BEGIN();
    beginTime = get_thread_time();
    if (TMQUEUE_ISEMPTY(workQueuePtr)) {
        if (TMQUEUE_ISEMPTY(workQueuePtr))
            coordinatePairPtr = NULL;
    } else {
      coordinatePairPtr = (pair_t*)TMQUEUE_POP(workQueuePtr);
    }
    TM_END();
    //add_throughput(threadId , get_thread_time() - beginTime);
    if (coordinatePairPtr == NULL) {
      break;
    }

    coordinate_t* srcPtr = (coordinate_t*)coordinatePairPtr->firstPtr;
    coordinate_t* dstPtr = (coordinate_t*)coordinatePairPtr->secondPtr;

    bool_t success = FALSE;
    vector_t* pointVectorPtr = NULL;

    TM_BEGIN();
    beginTime = get_thread_time();
    grid_copy(myGridPtr, gridPtr); /* ok if not most up-to-date */
    if (PdoExpansion(routerPtr, myGridPtr, myExpansionQueuePtr,
                     srcPtr, dstPtr)) {
      pointVectorPtr = PdoTraceback(gridPtr, myGridPtr, dstPtr, bendCost);
      /*
       * TODO: fix memory leak
       *
       * pointVectorPtr will be a memory leak if we abort this transaction
       */
      if (pointVectorPtr) {
        TMGRID_ADDPATH(gridPtr, pointVectorPtr);
        TM_LOCAL_WRITE_L(success, TRUE);
      }
    }
    TM_END();
    add_throughput(threadId , get_thread_time() - beginTime);

    numPath++;
    if (success) {
      bool_t status = PVECTOR_PUSHBACK(myPathVectorPtr,
                                       (void*)pointVectorPtr);
      assert(status);
    }

  }
  numPathArray[threadId] = numPath;
  /*
   * Add my paths to global list
   */
  list_t* pathVectorListPtr = routerArgPtr->pathVectorListPtr;
  TM_BEGIN();
  TMLIST_INSERT(pathVectorListPtr, (void*)myPathVectorPtr);
  TM_END();

  PGRID_FREE(myGridPtr);
  PQUEUE_FREE(myExpansionQueuePtr);

#if DEBUG
  puts("\nFinal Grid:");
  grid_print(gridPtr);
#endif /* DEBUG */

  TM_THREAD_EXIT();
}