dim_type* Get_Area_Part(dim_type bdim[],dim_type dim[],dim_type start[],dim_type end[],ndim_type ndim) { dim_type area_dim[TZ_MATRIX_MAX_DIM]; dim_type i,j; for(i=0; i<ndim; i++) { if (end == NULL) { area_dim[i] = dim[i]; } else { area_dim[i] = end[i] + 1; } if (start != NULL) { area_dim[i] -= start[i]; } } dim_type length = matrix_size(area_dim,ndim); dim_type *array = (dim_type *)malloc(sizeof(dim_type)*length); dim_type sub[TZ_MATRIX_MAX_DIM]; for(i=0; i<length; i++) { array[i] = 1; ind2sub(area_dim,ndim,i,sub); for(j=0; j<ndim; j++) { array[i] *= imin2(bdim[j]+dim[j]-2,sub[j]+start[j]+bdim[j]-1) -imax2(sub[j]+start[j],bdim[j]-2)+1; } } return array; }
real* probs_to_score_matrix( int order, int radix, real** pos_probs, real** neg_probs, bool averaging ) { int i; int size = matrix_size( order, radix ); real* scores = new_real_array( size ); real log_order = log( order ); if ( scores == NULL ) return NULL; for ( i = 0; i < size; i++ ) { // If averaging the probs need to be divided by the order if ( averaging ) { scores[i] = ( log( pos_probs[order][i] ) - log_order ) - ( log( neg_probs[order][i] ) - log_order ); } else { scores[i] = log( pos_probs[order][i] ) - log( neg_probs[order][i] ); } } return scores; }
void fill_in_counts( int order, int radix, int** counts, int* string, int string_len ) { int i, j, index, max_lookback; int max_index = matrix_size( order, radix ); for ( i = 0; i < string_len; i++ ) { max_lookback = min( order, i ) + 1; for ( j = 0; j < max_lookback; j++ ) { index = matrix_index( j, radix, string, i ); if ( index == -1 ) { continue; } assert( index >= 0 && index < max_index ); counts[ j ][ index ] += 1; } } }
void Init_Objlabel_Workspace_Gg(Objlabel_Workspace *ow) { int nvoxel = matrix_size(ow->chord->dim, ow->chord->ndim); int i; uint16 *level = (uint16 *) ow->u; for (i = 0; i < nvoxel; i++) { ow->chord->array[i] = -1; level[i] = 1; } }
matrix * create_matrix(size_t rows, size_t cols) { matrix *mm = (matrix *) malloc(sizeof(matrix)); mm->rows = rows; mm->cols = cols; size_t size = matrix_size(mm); mm->data = (DTYPE *) calloc(size, sizeof(DTYPE)); return mm; }
static mxArray* gadgetronNFFT_internal(mxArray* input_data,mxArray* input_trajectory, mxArray* dimensions, float W, mxArray* dcw=nullptr){ auto g_input_data = MatlabToHoNDArray<float_complext>(input_data); auto g_dimensions = MatlabToHoNDArray<size_t>(dimensions); boost::shared_ptr<hoNDArray<float>> g_dcw; if (dcw) g_dcw = boost::make_shared<hoNDArray<float>>(MatlabToHoNDArray<float>(dcw)); auto traj_dims = mxGetDimensions(input_trajectory); boost::shared_ptr<hoNDArray<float_complext> > output; if (traj_dims[0] == 2){ auto g_traj = MatlabToHoNDArray<vector_td<float,2 >>(input_trajectory); vector_td<uint64_t,2> matrix_size((g_dimensions)[0],(g_dimensions)[1]); output = gadgetronNFFT_instance(&g_input_data,&g_traj,matrix_size,W,g_dcw.get()); } else if (traj_dims[1]){ auto g_traj = MatlabToHoNDArray<vector_td<float,3 >>(input_trajectory); vector_td<uint64_t,3> matrix_size((g_dimensions)[0],(g_dimensions)[1],(g_dimensions)[2]); output = gadgetronNFFT_instance(&g_input_data,&g_traj,matrix_size,W,g_dcw.get()); } return hoNDArrayToMatlab(output.get()); }
static void parwork_next_rows(fsub_context_t* fsc) { /* continue processing the parallel work */ parwork_t* const w = &fsc->parwork; index_t i = INVALID_MATRIX_INDEX; index_t j = INVALID_MATRIX_INDEX; size_t row_count; parwork_lock_with_sync(w); const size_t asize = matrix_size(fsc->a); if (w->i < asize) { row_count = CONFIG_PAR_SIZE / w->j; if (row_count == 0) row_count = 1; else if ((w->i + row_count) > asize) row_count = asize - w->i; i = w->i; w->i += row_count; j = w->j; } parwork_unlock(w); /* did not get work */ if (i == INVALID_MATRIX_INDEX) return ; /* process the row */ const size_t saved_count = row_count; for (; row_count; --row_count, ++i) sub_row(fsc, i, j); /* update processed row_count */ add_atomic_ul(&w->row_count, saved_count); }
void fsub_pthread_apply(const matrix_t* a, vector_t* b) { /* assume fsc->lsize >= matrix_size(fsc->a) */ /* assume (fsc->lsize % fsc->ksize) == 0 */ /* assume (matrix_size(fsc->a) % fsc->lsize) == 0 */ fsub_context_t* const fsc = &global_fsc; /* fixme, should not rely on it */ if (fsc->a == NULL) { fsc->a = a; fsc->b = b; __sync_synchronize(); /* wake threads up */ size_t tid; for (tid = 0; tid < CONFIG_THREAD_COUNT; ++tid) fsc->pool.tbs[tid].state = THREAD_STATE_STEAL; } const size_t llcount = matrix_size(fsc->a) / fsc->lsize; /* solve the first llblock */ sub_tri_block(fsc, 0, fsc->lsize); /* post the band to process */ parwork_lock(&fsc->parwork); fsc->parwork.i = fsc->lsize; fsc->parwork.j = fsc->lsize; fsc->parwork.row_count = fsc->lsize; parwork_unlock(&fsc->parwork); /* slide along all the kkblocks on the diagonal */ const index_t last_i = (llcount - 1) * fsc->lsize; index_t i; for (i = fsc->lsize; i < last_i; i += fsc->ksize) { const index_t next_i = i + fsc->ksize; /* wait until left band processed. the loop is left with the lock held and no one working, thus safe to process the kk block and update parwork. synced_row_count is updated to reflect the new synchronized parwork.row_count. */ while (1) { if ((size_t)read_atomic_ul(&fsc->parwork.row_count) >= next_i) { parwork_lock(&fsc->parwork); parwork_synchronize(&fsc->parwork); break ; } /* contribute to the parallel work */ parwork_next_rows(fsc); } /* process the kk block sequentially */ sub_tri_block(fsc, i, fsc->ksize); /* update parwork area j (which is next_i) */ fsc->parwork.j = next_i; /* updating the parwork area may have created a hole below the kkblock. this hole dim are: i + fsc->ksize, i, parwork.row_count, i + fsc->ksize capture parwork.row_count before unlocking workers */ const index_t row_count = fsc->parwork.row_count; parwork_unlock(&fsc->parwork); /* process the band sequentially */ const size_t band_height = row_count - next_i; sub_rect_block(fsc, next_i, i, band_height, fsc->ksize); } /* wait until remaining left bands processed */ const size_t asize = matrix_size(fsc->a); while ((index_t)read_atomic_ul(&fsc->parwork.row_count) < asize) parwork_next_rows(fsc); /* proces last llblock sequentially */ if (llcount > 1) sub_tri_block(fsc, i, fsc->lsize); }
size_t matrix_bytes(matrix * mm) { return matrix_size(mm) * sizeof(DTYPE); }
int main(int argc, char *argv[]) { Stack *stack = Make_Stack(GREY, 3, 3, 3); int i; for (i = 0; i < Stack_Voxel_Number(stack); i++) { stack->array[i] = i; } Print_Stack(stack); printf("%g\n", Stack_Point_Sampling(stack, 1.1, 1.5, 1.0)); printf("%g\n", Stack_Point_Sampling(stack, 1.1, 1.0, 1.5)); printf("%g\n", Stack_Point_Sampling(stack, 1.1, 1.0, 1.0)); printf("%g\n", Stack_Point_Sampling(stack, 1.0, 1.5, 1.3)); printf("%g\n", Stack_Point_Sampling(stack, 1.0, 1.5, 1.0)); printf("%g\n", Stack_Point_Sampling(stack, 1.0, 1.0, 1.3)); printf("%g\n", Stack_Point_Sampling(stack, 1.0, 1.5, 1.0)); DMatrix *dm; dim_type dim[3]; dim[0] = 10; dim[1] = 4; dim[2] = 5; dm = Make_DMatrix(dim, 3); int j, k; int offset = 0; double x, y, z; dim[0] = matrix_size(dm->dim, dm->ndim); dim[1] = 3; DMatrix *points = Make_DMatrix(dim, 2); for (k = 0; k < dm->dim[2]; k++) { z = (double) (k ) * (stack->depth - 1) / (dm->dim[2] - 1); for (j = 0; j < dm->dim[1]; j++) { y = (double) (j ) * (stack->height - 1) / (dm->dim[1] - 1); for (i = 0; i < dm->dim[0]; i++) { x = (double) (i ) * (stack->width - 1) / (dm->dim[0] - 1); points->array[offset++] = x; points->array[offset++] = y; points->array[offset++] = z; } } } tic(); for (j = 0; j < 100; j++) { Stack_Points_Sampling(stack, points->array, points->dim[0], dm->array); } printf("%llu\n", toc()); DMatrix_Print(dm); Kill_Stack(stack); stack = Scale_Double_Stack(dm->array, dm->dim[0], dm->dim[1], dm->dim[2], GREY); Write_Stack("../data/test.tif", stack); Kill_Stack(stack); Kill_DMatrix(dm); Kill_DMatrix(points); return 0; }