void ivar_get_with_size( void *var, struct ivar iv, int size ) { log_3("ivar_get_with_size %p %p %d - enter\n", var, &iv, size); ivar_get_helper(iv.internals); memcpy( var, iv.internals->data, size ); log_3("ivar_get_with_size %p %p %d - leave\n", var, &iv, size); }
void ivar_put_with_size( struct ivar iv, void *d, int size ) { struct ivar_internals *ivi = iv.internals; log_3("ivar_put_with_size %p %p %d - enter\n", &iv, d, size); pthread_mutex_lock( &(ivi->mutex) ); ivi->data = (void*)malloc( size ); memcpy( ivi->data, d, size ); ivi->full = 1; pthread_cond_broadcast( &(ivi->cond) ); pthread_mutex_unlock( &(ivi->mutex) ); log_3("ivar_put_with_size %p %p %d - leave\n", &iv, d, size); }
void taskpool_init( int c, int n, int m ) { log_3("taskpool_init %d %d %d - enter\n",c,n,m); log_0("taskpool_init - allocating taskpool\n"); feldspar_taskpool = malloc(sizeof(struct taskpool)); log_1("taskpool_init - allocating %d closures\n",c); feldspar_taskpool->closures = malloc( c * sizeof(void*) ); feldspar_taskpool->capacity = c; feldspar_taskpool->head = 0; feldspar_taskpool->tail = 0; feldspar_taskpool->shutdown = 0; feldspar_taskpool->num_threads = n; feldspar_taskpool->act_threads = n; feldspar_taskpool->min_threads = m; feldspar_taskpool->max_threads = n; if( n > 0 ) pthread_mutex_init( &(feldspar_taskpool->mutex), NULL ); log_1("taskpool_init - starting %d threads\n",n); for( ; n > 0; --n ) { pthread_t th; pthread_create( &th, NULL, &worker, NULL ); log_1("taskpool_init - thread %p created\n", &th); } log_0("taskpool_init - leave\n"); }
void ivar_get_nontask_with_size( void *var, struct ivar iv, int size ) { struct ivar_internals *ivi = iv.internals; log_3("ivar_get_nontask_with_size %p %p %d - enter\n", var, &iv, size); pthread_mutex_lock( &(ivi->mutex) ); if ( !ivi->full ) log_3("ivar_get_nontask_with_size %p %p %d -> waiting for data\n" , var, &iv, size); while( !ivi->full ) { int err = pthread_cond_wait( &(ivi->cond), &(ivi->mutex) ); if (err) { exit(err); } } pthread_mutex_unlock( &(ivi->mutex) ); assert(ivi->data); memcpy( var, ivi->data, size ); log_3("ivar_get_nontask_with_size %p %p %d - leave\n", var, &iv, size); }
void ivar_get_helper( struct ivar_internals *iv ) { log_1("ivar_get_helper %p - enter\n", iv); pthread_mutex_lock( &(iv->mutex) ); if( !iv->full ) { log_1("ivar_get_helper %p - ivar is empty\n", iv); int create = 0; pthread_mutex_lock( &(feldspar_taskpool.mutex) ); if( !feldspar_taskpool.shutdown && (feldspar_taskpool.num_threads <= feldspar_taskpool.min_threads) ) { create = 1; ++feldspar_taskpool.num_threads; log_3("ivar_get_helper %p - will create a new thread; " "active: %d, all: %d\n" , iv, feldspar_taskpool.act_threads, feldspar_taskpool.num_threads); } else { --feldspar_taskpool.act_threads; log_3("ivar_get_helper %p - will NOT create a new thread; " "active: %d, all: %d\n" , iv, feldspar_taskpool.act_threads, feldspar_taskpool.num_threads); } pthread_mutex_unlock( &(feldspar_taskpool.mutex) ); if( create ) { pthread_t th; pthread_create( &th, NULL, &worker, (void*)&feldspar_taskpool ); } log_1("ivar_get_helper %p - blocking while waiting for data\n", iv); pthread_cond_wait( &(iv->cond), &(iv->mutex) ); pthread_mutex_lock( &(feldspar_taskpool.mutex) ); ++feldspar_taskpool.act_threads; log_3("ivar_get_helper %p - data arrived; active: %d, all: %d\n" , iv, feldspar_taskpool.act_threads, feldspar_taskpool.num_threads); pthread_mutex_unlock( &(feldspar_taskpool.mutex) ); } pthread_mutex_unlock( &(iv->mutex) ); log_1("ivar_get_helper %p - leave\n", iv); }
void spawn( void *closure ) { log_1("spawn %p - enter\n", closure); pthread_mutex_lock( &(feldspar_taskpool->mutex) ); feldspar_taskpool->closures[feldspar_taskpool->tail] = closure; log_3("spawn %p - saved as task %d at %p\n" , closure, feldspar_taskpool->tail , &feldspar_taskpool->closures[feldspar_taskpool->tail]); ++feldspar_taskpool->tail; if( feldspar_taskpool->tail == feldspar_taskpool->capacity ) feldspar_taskpool->tail = 0; pthread_mutex_unlock( &(feldspar_taskpool->mutex) ); log_1("spawn %p - leave\n", closure); }
void *worker() { unsigned int self; self = (unsigned long)pthread_self(); log_1("worker %d - enter\n", self); struct taskpool *pool = feldspar_taskpool; void (*fun)(); char *closure; int awake = 1; log_1("worker %d - entering the loop\n", self); while(1) { if( pool->shutdown && pool->head == pool->tail ) { log_1("worker %d - shutdown detected, going to terminate\n", self); break; } if( pool->act_threads > pool->max_threads ) { log_1("worker %d - too many active threads, going to terminate\n", self); break; } fun = NULL; closure = NULL; pthread_mutex_lock( &(pool->mutex) ); if( pool->head != pool->tail ) { log_2("worker %d - pop task %d\n", self, pool->head); closure = pool->closures[pool->head]; ++pool->head; if( pool->head == pool->capacity ) pool->head = 0; } else { } pthread_mutex_unlock( &(pool->mutex) ); if( closure == NULL ) { if (1 == awake) { log_1("worker %d - sleep\n", self); awake = 0; } } else { awake = 1; fun = *((void(**)())closure); log_2("worker %d - closure %p enter\n", self, fun); fun( closure + sizeof(void(*)()) ); /* TODO: sizeof(void*) == sizeof(void(**)()) is assumed here */ log_2("worker %d - closure %p leave\n", self, fun); } } /* Cleanup before exit: */ { int last = 0; log_1("worker %d - cleanup\n", self); pthread_mutex_lock( &(pool->mutex) ); --pool->num_threads; --pool->act_threads; log_3("worker %d - cleanup done; active: %d, all: %d\n" , self, pool->act_threads, pool->num_threads); last = (pool->num_threads == 0); pthread_mutex_unlock( &(pool->mutex) ); if( last ) { log_1("worker %d - last one does extra cleanup\n", self); pthread_mutex_destroy( &(pool->mutex) ); } } log_1("worker %d - leave\n", self); pthread_exit(NULL); }