void @TYPE@_vector_permute(@TYPE@_vector_type * vector , const int * perm) { @TYPE@_vector_assert_writable( vector ); { int i; @TYPE@ * tmp = util_alloc_copy( vector->data , sizeof * tmp * vector->size ); for (i=0; i < vector->size; i++) vector->data[i] = tmp[perm[i]]; free( tmp ); } }
ecl_sum_tstep_type * ecl_sum_tstep_alloc_remap_copy( const ecl_sum_tstep_type * src , const ecl_smspec_type * new_smspec, float default_value , const int * params_map) { int params_size = ecl_smspec_get_params_size( new_smspec ); ecl_sum_tstep_type * target = util_alloc_copy(src , sizeof * src ); target->smspec = new_smspec; target->data = util_malloc( params_size * sizeof * target->data ); target->data_size = params_size; for (int i=0; i < params_size; i++) { if (params_map[i] >= 0) target->data[i] = src->data[ params_map[i] ]; else target->data[i] = default_value; } return target; }
static node_data_type * node_data_copyc(const node_data_type * src , bool deep_copy) { node_data_type * next; if (src->buffer_size > 0) { /* The source node has internal storage - it has been allocated with _alloc_buffer() */ if (deep_copy) next = node_data_alloc__(util_alloc_copy( src->data , src->buffer_size ) /* A next copy is allocated prior to insert. */ , src->ctype , src->buffer_size , NULL , free); else next = node_data_alloc__(src->data , src->ctype , src->buffer_size , NULL , NULL); /* The copy does not have destructor. */ } else { if (deep_copy) { if (src->copyc == NULL) util_abort("%s: Tried allocate deep_copy of mnode with no constructor - aborting. \n",__func__); next = node_data_alloc__(src->data , src->ctype , 0 , src->copyc , src->del); } else next = node_data_alloc__(src->data , src->ctype , 0 , NULL , NULL); /*shallow copy - we 'hide' constructor and destructor. */ } return next; }
point_type * point_copyc( const point_type * p) { point_type * copy = util_alloc_copy( p , sizeof * p ); return copy; }
node_data_type * node_data_alloc_buffer(const void * data, int buffer_size) { /* The buffer is copied on insert. */ void * data_copy = util_alloc_copy( data , buffer_size ); return node_data_alloc__( data_copy , CTYPE_VOID_POINTER , buffer_size , NULL , free); }
node_data_type * node_data_alloc_double(double value) { void * data_copy = util_alloc_copy( &value , sizeof value ); return node_data_alloc__( data_copy , CTYPE_DOUBLE_VALUE , sizeof value , NULL , free); }
node_data_type * node_data_alloc_int(int value) { void * data_copy = util_alloc_copy( &value , sizeof value ); return node_data_alloc__( data_copy , CTYPE_INT_VALUE , sizeof value , NULL , free); }
/** Returns a copy of the initialized (i.e. buffer->content_size) buffer content. */ void * buffer_alloc_data_copy(const buffer_type * buffer) { return util_alloc_copy(buffer->data , buffer->content_size ); }
ecl_sum_tstep_type * ecl_sum_tstep_alloc_copy( const ecl_sum_tstep_type * src ) { ecl_sum_tstep_type * target = util_alloc_copy(src , sizeof * src ); target->data = util_alloc_copy( src->data , src->data_size * sizeof * src->data ); return target; }
static void * thread_pool_main_loop( void * arg ) { thread_pool_type * tp = (thread_pool_type *) arg; { const int usleep_busy = 1000; /* The sleep time when all job slots are occupied. */ const int usleep_init = 1000; /* The sleep time when there are free slots available - but no jobs wanting to run. */ int internal_offset = 0; /* Keep track of the (index of) the last job slot fired off - minor time saving. */ while (true) { if (tp->queue_size > tp->queue_index) { /* There are jobs in the queue which would like to run - let us see if we can find a slot for them. */ int counter = 0; bool slot_found = false; do { int slot_index = (counter + internal_offset) % tp->max_running; thread_pool_job_slot_type * job_slot = &tp->job_slots[ slot_index ]; if (!job_slot->running) { /* OK thread[slot_index] is ready to take this job.*/ thread_pool_arg_type * tp_arg; /* The queue might be updated by the main thread - we must take a copy of the node we are interested in. */ pthread_rwlock_rdlock( &tp->queue_lock ); tp_arg = util_alloc_copy( &tp->queue[ tp->queue_index ] , sizeof * tp_arg ); pthread_rwlock_unlock( &tp->queue_lock ); tp_arg->slot_index = slot_index; job_slot->running = true; /* Here is the actual pthread_create() call creating an additional running thread. */ pthread_create( &job_slot->thread , NULL , thread_pool_start_job , tp_arg ); job_slot->run_count += 1; tp->queue_index++; internal_offset += (counter + 1); slot_found = true; } else counter++; } while (!slot_found && (counter < tp->max_running)); if (!slot_found) util_usleep( usleep_busy ); /* There are no available job slots. */ } else util_usleep( usleep_init ); /* There are no jobs wanting to run. */ /*****************************************************************/ /* We exit explicitly from this loop when both conditions apply: 1. tp->join == true : The calling scope has signaled that it will not submit more jobs. 2. tp->queue_size == tp->queue_index : This function has submitted all the jobs in the queue. */ if ((tp->join) && (tp->queue_size == tp->queue_index)) break; } /* End of while() loop */ } /* There are no more jobs in the queue, and the main scope has signaled that join should start. Observe that we join only the jobs corresponding to explicitly running job_slots; when a job slot is used multiple times the first jobs run in the job_slot will not be explicitly joined. */ { int i; for (i=0; i < tp->max_running; i++) { thread_pool_job_slot_type job_slot = tp->job_slots[i]; if (job_slot.run_count > 0) pthread_join( job_slot.thread , NULL ); } } /* When we are here all the jobs have completed. */ return NULL; }