void bli_trmm_lu_blk_var4( obj_t* alpha, obj_t* a, obj_t* b, obj_t* beta, obj_t* c, trmm_t* cntl ) { obj_t a1, a1_pack; obj_t b_pack; obj_t c1, c1_pack; dim_t i; dim_t bm_alg; dim_t mT_trans; // Initialize all pack objects that are passed into packm_init(). bli_obj_init_pack( &a1_pack ); bli_obj_init_pack( &b_pack ); bli_obj_init_pack( &c1_pack ); // Query dimension in partitioning direction. Use the diagonal offset // to stop short of the zero region. mT_trans = bli_abs( bli_obj_diag_offset_after_trans( *a ) ) + bli_obj_width_after_trans( *a ); // Scale C by beta (if instructed). bli_scalm_int( beta, c, cntl_sub_scalm( cntl ) ); // Initialize object for packing B. bli_packm_init( b, &b_pack, cntl_sub_packm_b( cntl ) ); // Fuse the first iteration with incremental packing and computation. { obj_t b_inc, b_pack_inc; obj_t c1_pack_inc; dim_t j; dim_t bn_inc; dim_t n_trans; // Query dimension in partitioning direction. n_trans = bli_obj_width( b_pack ); // Determine the current algorithmic blocksize. bm_alg = bli_determine_blocksize_f( 0, mT_trans, a, cntl_blocksize( cntl ) ); // Acquire partitions for A1 and C1. bli_acquire_mpart_t2b( BLIS_SUBPART1, 0, bm_alg, a, &a1 ); bli_acquire_mpart_t2b( BLIS_SUBPART1, 0, bm_alg, c, &c1 ); // Initialize objects for packing A1 and C1. bli_packm_init( &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); bli_packm_init( &c1, &c1_pack, cntl_sub_packm_c( cntl ) ); // Pack A1 and scale by alpha (if instructed). bli_packm_int( alpha, &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); // Pack C1 and scale by beta (if instructed). bli_packm_int( beta, &c1, &c1_pack, cntl_sub_packm_c( cntl ) ); // Partition along the n dimension. for ( j = 0; j < n_trans; j += bn_inc ) { // Determine the current incremental packing blocksize. bn_inc = bli_determine_blocksize_f( j, n_trans, b, cntl_blocksize_aux( cntl ) ); // Acquire partitions. bli_acquire_mpart_l2r( BLIS_SUBPART1, j, bn_inc, b, &b_inc ); bli_acquire_mpart_l2r( BLIS_SUBPART1, j, bn_inc, &b_pack, &b_pack_inc ); bli_acquire_mpart_l2r( BLIS_SUBPART1, j, bn_inc, &c1_pack, &c1_pack_inc ); // Pack B1 and scale by alpha (if instructed). bli_packm_int( alpha, &b_inc, &b_pack_inc, cntl_sub_packm_b( cntl ) ); // Perform trmm subproblem. bli_trmm_int( BLIS_LEFT, alpha, &a1_pack, &b_pack_inc, beta, &c1_pack_inc, cntl_sub_trmm( cntl ) ); } // Unpack C1 (if C1 was packed). bli_unpackm_int( &c1_pack, &c1, cntl_sub_unpackm_c( cntl ) ); } // Partition along the remaining portion of the m dimension. for ( i = bm_alg; i < mT_trans; i += bm_alg ) { // Determine the current algorithmic blocksize. bm_alg = bli_determine_blocksize_f( i, mT_trans, a, cntl_blocksize( cntl ) ); // Acquire partitions for A1 and C1. bli_acquire_mpart_t2b( BLIS_SUBPART1, i, bm_alg, a, &a1 ); bli_acquire_mpart_t2b( BLIS_SUBPART1, i, bm_alg, c, &c1 ); // Initialize objects for packing A1 and C1. bli_packm_init( &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); bli_packm_init( &c1, &c1_pack, cntl_sub_packm_c( cntl ) ); // Pack A1 and scale by alpha (if instructed). bli_packm_int( alpha, &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); // Pack C1 and scale by beta (if instructed). bli_packm_int( beta, &c1, &c1_pack, cntl_sub_packm_c( cntl ) ); // Perform trmm subproblem. if ( bli_obj_intersects_diag( a1_pack ) ) bli_trmm_int( BLIS_LEFT, alpha, &a1_pack, &b_pack, beta, &c1_pack, cntl_sub_trmm( cntl ) ); else bli_gemm_int( alpha, &a1_pack, &b_pack, &BLIS_ONE, &c1_pack, cntl_sub_gemm( cntl ) ); // Unpack C1 (if C1 was packed). bli_unpackm_int( &c1_pack, &c1, cntl_sub_unpackm_c( cntl ) ); } // If any packing buffers were acquired within packm, release them back // to the memory manager. bli_obj_release_pack( &a1_pack ); bli_obj_release_pack( &b_pack ); bli_obj_release_pack( &c1_pack ); }
void bli_herk_blk_var2f( obj_t* a, obj_t* ah, obj_t* c, gemm_t* cntl, herk_thrinfo_t* thread ) { obj_t a_pack_s; obj_t ah1_pack_s, c1S_pack_s; obj_t ah1, c1, c1S; obj_t aS_pack; obj_t* a_pack; obj_t* ah1_pack; obj_t* c1S_pack; dim_t i; dim_t b_alg; dim_t n_trans; subpart_t stored_part; // The upper and lower variants are identical, except for which // merged subpartition is acquired in the loop body. if ( bli_obj_is_lower( *c ) ) stored_part = BLIS_SUBPART1B; else stored_part = BLIS_SUBPART1T; if( thread_am_ochief( thread ) ) { // Initialize object for packing A bli_obj_init_pack( &a_pack_s ); bli_packm_init( a, &a_pack_s, cntl_sub_packm_a( cntl ) ); // Scale C by beta (if instructed). bli_scalm_int( &BLIS_ONE, c, cntl_sub_scalm( cntl ) ); } a_pack = thread_obroadcast( thread, &a_pack_s ); // Initialize pack objects for C and A' that are passed into packm_init(). if( thread_am_ichief( thread ) ) { bli_obj_init_pack( &ah1_pack_s ); bli_obj_init_pack( &c1S_pack_s ); } ah1_pack = thread_ibroadcast( thread, &ah1_pack_s ); c1S_pack = thread_ibroadcast( thread, &c1S_pack_s ); // Pack A (if instructed). bli_packm_int( a, a_pack, cntl_sub_packm_a( cntl ), herk_thread_sub_opackm( thread ) ); // Query dimension in partitioning direction. n_trans = bli_obj_width_after_trans( *c ); dim_t start, end; // Needs to be replaced with a weighted range because triangle bli_get_range_weighted( thread, 0, n_trans, bli_blksz_get_mult_for_obj( a, cntl_blocksize( cntl ) ), bli_obj_is_lower( *c ), &start, &end ); // Partition along the n dimension. for ( i = start; i < end; i += b_alg ) { // Determine the current algorithmic blocksize. b_alg = bli_determine_blocksize_f( i, end, a, cntl_blocksize( cntl ) ); // Acquire partitions for A1' and C1. bli_acquire_mpart_l2r( BLIS_SUBPART1, i, b_alg, ah, &ah1 ); bli_acquire_mpart_l2r( BLIS_SUBPART1, i, b_alg, c, &c1 ); // Partition off the stored region of C1 and the corresponding region // of A_pack. bli_acquire_mpart_t2b( stored_part, i, b_alg, &c1, &c1S ); bli_acquire_mpart_t2b( stored_part, i, b_alg, a_pack, &aS_pack ); // Initialize objects for packing A1' and C1. if( thread_am_ichief( thread ) ) { bli_packm_init( &ah1, ah1_pack, cntl_sub_packm_b( cntl ) ); bli_packm_init( &c1S, c1S_pack, cntl_sub_packm_c( cntl ) ); } thread_ibarrier( thread ) ; // Pack A1' (if instructed). bli_packm_int( &ah1, ah1_pack, cntl_sub_packm_b( cntl ), herk_thread_sub_ipackm( thread ) ); // Pack C1 (if instructed). bli_packm_int( &c1S, c1S_pack, cntl_sub_packm_c( cntl ), herk_thread_sub_ipackm( thread ) ) ; // Perform herk subproblem. bli_herk_int( &BLIS_ONE, &aS_pack, ah1_pack, &BLIS_ONE, c1S_pack, cntl_sub_gemm( cntl ), herk_thread_sub_herk( thread ) ); thread_ibarrier( thread ); // Unpack C1 (if C1 was packed). bli_unpackm_int( c1S_pack, &c1S, cntl_sub_unpackm_c( cntl ), herk_thread_sub_ipackm( thread ) ); } // If any packing buffers were acquired within packm, release them back // to the memory manager. thread_obarrier( thread ); if( thread_am_ochief( thread ) ) bli_packm_release( a_pack, cntl_sub_packm_a( cntl ) ); if( thread_am_ichief( thread ) ) { bli_packm_release( ah1_pack, cntl_sub_packm_b( cntl ) ); bli_packm_release( c1S_pack, cntl_sub_packm_c( cntl ) ); } }
void bli_herk_blk_var1f( obj_t* a, obj_t* ah, obj_t* c, gemm_t* cntl, herk_thrinfo_t* thread ) { obj_t ah_pack_s; obj_t a1_pack_s, c1_pack_s; obj_t a1, c1; obj_t* a1_pack; obj_t* c1_pack; obj_t* ah_pack; dim_t i; dim_t b_alg; // Prune any zero region that exists along the partitioning dimension. bli_herk_prune_unref_mparts_m( a, ah, c ); if( thread_am_ochief( thread ) ) { // Initialize object for packing A'. bli_obj_init_pack( &ah_pack_s ); bli_packm_init( ah, &ah_pack_s, cntl_sub_packm_b( cntl ) ); // Scale C by beta (if instructed). // Since scalm doesn't support multithreading yet, must be done by chief thread (ew) bli_scalm_int( &BLIS_ONE, c, cntl_sub_scalm( cntl ) ); } ah_pack = thread_obroadcast( thread, &ah_pack_s ); // Initialize pack objects that are passed into packm_init() for A and C. if( thread_am_ichief( thread ) ) { bli_obj_init_pack( &a1_pack_s ); bli_obj_init_pack( &c1_pack_s ); } a1_pack = thread_ibroadcast( thread, &a1_pack_s ); c1_pack = thread_ibroadcast( thread, &c1_pack_s ); // Pack A' (if instructed). bli_packm_int( ah, ah_pack, cntl_sub_packm_b( cntl ), herk_thread_sub_opackm( thread ) ); dim_t my_start, my_end; bli_get_range_weighted_t2b( thread, c, bli_blksz_get_mult_for_obj( a, cntl_blocksize( cntl ) ), &my_start, &my_end ); // Partition along the m dimension. for ( i = my_start; i < my_end; i += b_alg ) { // Determine the current algorithmic blocksize. b_alg = bli_determine_blocksize_f( i, my_end, a, cntl_blocksize( cntl ) ); // Acquire partitions for A1 and C1. bli_acquire_mpart_t2b( BLIS_SUBPART1, i, b_alg, a, &a1 ); bli_acquire_mpart_t2b( BLIS_SUBPART1, i, b_alg, c, &c1 ); // Initialize objects for packing A1 and C1. if( thread_am_ichief( thread ) ) { bli_packm_init( &a1, a1_pack, cntl_sub_packm_a( cntl ) ); bli_packm_init( &c1, c1_pack, cntl_sub_packm_c( cntl ) ); } thread_ibarrier( thread ); // Pack A1 (if instructed). bli_packm_int( &a1, a1_pack, cntl_sub_packm_a( cntl ), herk_thread_sub_ipackm( thread ) ); // Pack C1 (if instructed). bli_packm_int( &c1, c1_pack, cntl_sub_packm_c( cntl ), herk_thread_sub_ipackm( thread ) ); // Perform herk subproblem. bli_herk_int( &BLIS_ONE, a1_pack, ah_pack, &BLIS_ONE, c1_pack, cntl_sub_gemm( cntl ), herk_thread_sub_herk( thread ) ); thread_ibarrier( thread ); // Unpack C1 (if C1 was packed). bli_unpackm_int( c1_pack, &c1, cntl_sub_unpackm_c( cntl ), herk_thread_sub_ipackm( thread ) ); } // If any packing buffers were acquired within packm, release them back // to the memory manager. thread_obarrier( thread ); if( thread_am_ochief( thread ) ) bli_packm_release( ah_pack, cntl_sub_packm_b( cntl ) ); if( thread_am_ichief( thread ) ) { bli_packm_release( a1_pack, cntl_sub_packm_a( cntl ) ); bli_packm_release( c1_pack, cntl_sub_packm_c( cntl ) ); } }
void bli_hemv_blk_var1( conj_t conjh, obj_t* alpha, obj_t* a, obj_t* x, obj_t* beta, obj_t* y, cntx_t* cntx, hemv_t* cntl ) { obj_t a11, a11_pack; obj_t a10; obj_t x1, x1_pack; obj_t x0; obj_t y1, y1_pack; obj_t y0; dim_t mn; dim_t ij; dim_t b_alg; // Even though this blocked algorithm is expressed only in terms of the // lower triangular case, the upper triangular case is still supported: // when bli_acquire_mpart_tl2br() is passed a matrix that is stored in // in the upper triangle, and the requested subpartition resides in the // lower triangle (as is the case for this algorithm), the routine fills // the request as if the caller had actually requested the corresponding // "mirror" subpartition in the upper triangle, except that it marks the // subpartition for transposition (and conjugation). // Initialize objects for packing. bli_obj_init_pack( &a11_pack ); bli_obj_init_pack( &x1_pack ); bli_obj_init_pack( &y1_pack ); // Query dimension. mn = bli_obj_length( a ); // y = beta * y; bli_scalv_int( beta, y, cntx, bli_cntl_sub_scalv( cntl ) ); // Partition diagonally. for ( ij = 0; ij < mn; ij += b_alg ) { // Determine the current algorithmic blocksize. b_alg = bli_determine_blocksize_f( ij, mn, a, bli_cntl_bszid( cntl ), cntx ); // Acquire partitions for A11, A10, x1, x0, y1, and y0. bli_acquire_mpart_tl2br( BLIS_SUBPART11, ij, b_alg, a, &a11 ); bli_acquire_mpart_tl2br( BLIS_SUBPART10, ij, b_alg, a, &a10 ); bli_acquire_vpart_f2b( BLIS_SUBPART1, ij, b_alg, x, &x1 ); bli_acquire_vpart_f2b( BLIS_SUBPART0, ij, b_alg, x, &x0 ); bli_acquire_vpart_f2b( BLIS_SUBPART1, ij, b_alg, y, &y1 ); bli_acquire_vpart_f2b( BLIS_SUBPART0, ij, b_alg, y, &y0 ); // Initialize objects for packing A11, x1, and y1 (if needed). bli_packm_init( &a11, &a11_pack, cntx, bli_cntl_sub_packm_a11( cntl ) ); bli_packv_init( &x1, &x1_pack, cntx, bli_cntl_sub_packv_x1( cntl ) ); bli_packv_init( &y1, &y1_pack, cntx, bli_cntl_sub_packv_y1( cntl ) ); // Copy/pack A11, x1, y1 (if needed). bli_packm_int( &a11, &a11_pack, cntx, bli_cntl_sub_packm_a11( cntl ), &BLIS_PACKM_SINGLE_THREADED ); bli_packv_int( &x1, &x1_pack, cntx, bli_cntl_sub_packv_x1( cntl ) ); bli_packv_int( &y1, &y1_pack, cntx, bli_cntl_sub_packv_y1( cntl ) ); // y0 = y0 + alpha * A10' * x1; bli_gemv_int( bli_apply_conj( conjh, BLIS_TRANSPOSE ), BLIS_NO_CONJUGATE, alpha, &a10, &x1_pack, &BLIS_ONE, &y0, cntx, bli_cntl_sub_gemv_t_rp( cntl ) ); // y1 = y1 + alpha * A11 * x1; bli_hemv_int( conjh, alpha, &a11_pack, &x1_pack, &BLIS_ONE, &y1_pack, cntx, bli_cntl_sub_hemv( cntl ) ); // y1 = y1 + alpha * A10 * x0; bli_gemv_int( BLIS_NO_TRANSPOSE, BLIS_NO_CONJUGATE, alpha, &a10, &x0, &BLIS_ONE, &y1_pack, cntx, bli_cntl_sub_gemv_n_rp( cntl ) ); // Copy/unpack y1 (if y1 was packed). bli_unpackv_int( &y1_pack, &y1, cntx, bli_cntl_sub_unpackv_y1( cntl ) ); } // If any packing buffers were acquired within packm, release them back // to the memory manager. bli_packm_release( &a11_pack, bli_cntl_sub_packm_a11( cntl ) ); bli_packv_release( &x1_pack, bli_cntl_sub_packv_x1( cntl ) ); bli_packv_release( &y1_pack, bli_cntl_sub_packv_y1( cntl ) ); }
void bli_gemv_blk_var2( obj_t* alpha, obj_t* a, obj_t* x, obj_t* beta, obj_t* y, cntx_t* cntx, gemv_t* cntl ) { obj_t a1, a1_pack; obj_t x1, x1_pack; dim_t n_trans; dim_t i; dim_t b_alg; // Initialize objects for packing. bli_obj_init_pack( &a1_pack ); bli_obj_init_pack( &x1_pack ); // Query dimension in partitioning direction. n_trans = bli_obj_width_after_trans( a ); // y = beta * y; bli_scalv_int( beta, y, cntx, bli_cntl_sub_scalv( cntl ) ); // Partition along the "k" dimension (n dimension of A). for ( i = 0; i < n_trans; i += b_alg ) { // Determine the current algorithmic blocksize. b_alg = bli_determine_blocksize_f( i, n_trans, a, bli_cntl_bszid( cntl ), cntx ); // Acquire partitions for A1 and x1. bli_acquire_mpart_l2r( BLIS_SUBPART1, i, b_alg, a, &a1 ); bli_acquire_vpart_f2b( BLIS_SUBPART1, i, b_alg, x, &x1 ); // Initialize objects for packing A1 and x1 (if needed). bli_packm_init( &a1, &a1_pack, cntx, bli_cntl_sub_packm_a( cntl ) ); bli_packv_init( &x1, &x1_pack, cntx, bli_cntl_sub_packv_x( cntl ) ); // Copy/pack A1, x1 (if needed). bli_packm_int( &a1, &a1_pack, cntx, bli_cntl_sub_packm_a( cntl ), &BLIS_PACKM_SINGLE_THREADED ); bli_packv_int( &x1, &x1_pack, cntx, bli_cntl_sub_packv_x( cntl ) ); // y = y + alpha * A1 * x1; bli_gemv_int( BLIS_NO_TRANSPOSE, BLIS_NO_CONJUGATE, alpha, &a1_pack, &x1_pack, &BLIS_ONE, y, cntx, bli_cntl_sub_gemv( cntl ) ); } // If any packing buffers were acquired within packm, release them back // to the memory manager. bli_packm_release( &a1_pack, bli_cntl_sub_packm_a( cntl ) ); bli_packv_release( &x1_pack, bli_cntl_sub_packv_x( cntl ) ); }
void bli_gemm_blk_var1f( obj_t* a, obj_t* b, obj_t* c, cntx_t* cntx, gemm_t* cntl, gemm_thrinfo_t* thread ) { //The s is for "lives on the stack" obj_t b_pack_s; obj_t a1_pack_s, c1_pack_s; obj_t a1, c1; obj_t* a1_pack = NULL; obj_t* b_pack = NULL; obj_t* c1_pack = NULL; dim_t i; dim_t b_alg; if( thread_am_ochief( thread ) ) { // Initialize object for packing B. bli_obj_init_pack( &b_pack_s ); bli_packm_init( b, &b_pack_s, cntx, cntl_sub_packm_b( cntl ) ); // Scale C by beta (if instructed). // Since scalm doesn't support multithreading yet, must be done by chief thread (ew) bli_scalm_int( &BLIS_ONE, c, cntx, cntl_sub_scalm( cntl ) ); } b_pack = thread_obroadcast( thread, &b_pack_s ); // Initialize objects passed into bli_packm_init for A and C if( thread_am_ichief( thread ) ) { bli_obj_init_pack( &a1_pack_s ); bli_obj_init_pack( &c1_pack_s ); } a1_pack = thread_ibroadcast( thread, &a1_pack_s ); c1_pack = thread_ibroadcast( thread, &c1_pack_s ); // Pack B (if instructed). bli_packm_int( b, b_pack, cntx, cntl_sub_packm_b( cntl ), gemm_thread_sub_opackm( thread ) ); dim_t my_start, my_end; bli_get_range_t2b( thread, a, bli_cntx_get_bmult( cntl_bszid( cntl ), cntx ), &my_start, &my_end ); // Partition along the m dimension. for ( i = my_start; i < my_end; i += b_alg ) { // Determine the current algorithmic blocksize. // NOTE: Use of a (for execution datatype) is intentional! // This causes the right blocksize to be used if c and a are // complex and b is real. b_alg = bli_determine_blocksize_f( i, my_end, a, cntl_bszid( cntl ), cntx ); // Acquire partitions for A1 and C1. bli_acquire_mpart_t2b( BLIS_SUBPART1, i, b_alg, a, &a1 ); bli_acquire_mpart_t2b( BLIS_SUBPART1, i, b_alg, c, &c1 ); // Initialize objects for packing A1 and C1. if( thread_am_ichief( thread ) ) { bli_packm_init( &a1, a1_pack, cntx, cntl_sub_packm_a( cntl ) ); bli_packm_init( &c1, c1_pack, cntx, cntl_sub_packm_c( cntl ) ); } thread_ibarrier( thread ); // Pack A1 (if instructed). bli_packm_int( &a1, a1_pack, cntx, cntl_sub_packm_a( cntl ), gemm_thread_sub_ipackm( thread ) ); // Pack C1 (if instructed). bli_packm_int( &c1, c1_pack, cntx, cntl_sub_packm_c( cntl ), gemm_thread_sub_ipackm( thread ) ); // Perform gemm subproblem. bli_gemm_int( &BLIS_ONE, a1_pack, b_pack, &BLIS_ONE, c1_pack, cntx, cntl_sub_gemm( cntl ), gemm_thread_sub_gemm( thread ) ); thread_ibarrier( thread ); // Unpack C1 (if C1 was packed). // Currently must be done by 1 thread bli_unpackm_int( c1_pack, &c1, cntx, cntl_sub_unpackm_c( cntl ), gemm_thread_sub_ipackm( thread ) ); } // If any packing buffers were acquired within packm, release them back // to the memory manager. thread_obarrier( thread ); if( thread_am_ochief( thread ) ) bli_packm_release( b_pack, cntl_sub_packm_b( cntl ) ); if( thread_am_ichief( thread ) ){ bli_packm_release( a1_pack, cntl_sub_packm_a( cntl ) ); bli_packm_release( c1_pack, cntl_sub_packm_c( cntl ) ); } }
void bli_trmm_blk_var2f( obj_t* a, obj_t* b, obj_t* c, trmm_t* cntl ) { obj_t a_pack; obj_t b1, b1_pack; obj_t c1, c1_pack; dim_t i; dim_t b_alg; dim_t n_trans; // Initialize all pack objects that are passed into packm_init(). bli_obj_init_pack( &a_pack ); bli_obj_init_pack( &b1_pack ); bli_obj_init_pack( &c1_pack ); // Query dimension in partitioning direction. n_trans = bli_obj_width_after_trans( *b ); // Scale C by beta (if instructed). bli_scalm_int( &BLIS_ONE, c, cntl_sub_scalm( cntl ) ); // Initialize object for packing A. bli_packm_init( a, &a_pack, cntl_sub_packm_a( cntl ) ); // Pack A (if instructed). bli_packm_int( a, &a_pack, cntl_sub_packm_a( cntl ) ); // Partition along the n dimension. for ( i = 0; i < n_trans; i += b_alg ) { // Determine the current algorithmic blocksize. b_alg = bli_determine_blocksize_f( i, n_trans, b, cntl_blocksize( cntl ) ); // Acquire partitions for B1 and C1. bli_acquire_mpart_l2r( BLIS_SUBPART1, i, b_alg, b, &b1 ); bli_acquire_mpart_l2r( BLIS_SUBPART1, i, b_alg, c, &c1 ); // Initialize objects for packing A1 and B1. bli_packm_init( &b1, &b1_pack, cntl_sub_packm_b( cntl ) ); bli_packm_init( &c1, &c1_pack, cntl_sub_packm_c( cntl ) ); // Pack B1 (if instructed). bli_packm_int( &b1, &b1_pack, cntl_sub_packm_b( cntl ) ); // Pack C1 (if instructed). bli_packm_int( &c1, &c1_pack, cntl_sub_packm_c( cntl ) ); // Perform trmm subproblem. bli_trmm_int( &BLIS_ONE, &a_pack, &b1_pack, &BLIS_ONE, &c1_pack, cntl_sub_trmm( cntl ) ); // Unpack C1 (if C1 was packed). bli_unpackm_int( &c1_pack, &c1, cntl_sub_unpackm_c( cntl ) ); } // If any packing buffers were acquired within packm, release them back // to the memory manager. bli_obj_release_pack( &a_pack ); bli_obj_release_pack( &b1_pack ); bli_obj_release_pack( &c1_pack ); }
void bli_gemm_blk_var4( obj_t* a, obj_t* b, obj_t* c, gemm_t* cntl ) { obj_t a1, a1_pack; obj_t b_pack; obj_t c1, c1_pack; dim_t i; dim_t bm_alg; dim_t m_trans; // Initialize all pack objects that are passed into packm_init(). bli_obj_init_pack( &a1_pack ); bli_obj_init_pack( &b_pack ); bli_obj_init_pack( &c1_pack ); // Query dimension in partitioning direction. m_trans = bli_obj_length_after_trans( *a ); // Scale C by beta (if instructed). bli_scalm_int( &BLIS_ONE, c, cntl_sub_scalm( cntl ) ); // Initialize object for packing B. bli_packm_init( b, &b_pack, cntl_sub_packm_b( cntl ) ); // Fuse the first iteration with incremental packing and computation. { obj_t b_inc, b_pack_inc; obj_t c1_pack_inc; dim_t j; dim_t bn_inc; dim_t n_trans; // Query dimension in partitioning direction. n_trans = bli_obj_width( b_pack ); // Determine the current algorithmic blocksize. bm_alg = bli_determine_blocksize_f( 0, m_trans, a, cntl_blocksize( cntl ) ); // Acquire partitions for A1 and C1. bli_acquire_mpart_t2b( BLIS_SUBPART1, 0, bm_alg, a, &a1 ); bli_acquire_mpart_t2b( BLIS_SUBPART1, 0, bm_alg, c, &c1 ); // Initialize objects for packing A1 and C1. bli_packm_init( &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); bli_packm_init( &c1, &c1_pack, cntl_sub_packm_c( cntl ) ); // Pack A1 (if instructed). bli_packm_int( &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); // Pack C1 (if instructed). bli_packm_int( &c1, &c1_pack, cntl_sub_packm_c( cntl ) ); // Partition along the n dimension. for ( j = 0; j < n_trans; j += bn_inc ) { // Determine the current incremental packing blocksize. bn_inc = bli_determine_blocksize_f( j, n_trans, b, cntl_blocksize_aux( cntl ) ); // Acquire partitions. bli_acquire_mpart_l2r( BLIS_SUBPART1, j, bn_inc, b, &b_inc ); bli_acquire_mpart_l2r( BLIS_SUBPART1, j, bn_inc, &b_pack, &b_pack_inc ); bli_acquire_mpart_l2r( BLIS_SUBPART1, j, bn_inc, &c1_pack, &c1_pack_inc ); // Pack B1 (if instructed). bli_packm_int( &b_inc, &b_pack_inc, cntl_sub_packm_b( cntl ) ); // Perform gemm subproblem. bli_gemm_int( &BLIS_ONE, &a1_pack, &b_pack_inc, &BLIS_ONE, &c1_pack_inc, cntl_sub_gemm( cntl ) ); } // Unpack C1 (if C1 was packed). bli_unpackm_int( &c1_pack, &c1, cntl_sub_unpackm_c( cntl ) ); } // Partition along the remaining portion of the m dimension. for ( i = bm_alg; i < m_trans; i += bm_alg ) { // Determine the current algorithmic blocksize. // NOTE: Use of a (for execution datatype) is intentional! // This causes the right blocksize to be used if c and a are // complex and b is real. bm_alg = bli_determine_blocksize_f( i, m_trans, a, cntl_blocksize( cntl ) ); // Acquire partitions for A1 and C1. bli_acquire_mpart_t2b( BLIS_SUBPART1, i, bm_alg, a, &a1 ); bli_acquire_mpart_t2b( BLIS_SUBPART1, i, bm_alg, c, &c1 ); // Initialize objects for packing A1 and C1. bli_packm_init( &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); bli_packm_init( &c1, &c1_pack, cntl_sub_packm_c( cntl ) ); // Pack A1 (if instructed). bli_packm_int( &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); // Pack C1 (if instructed). bli_packm_int( &c1, &c1_pack, cntl_sub_packm_c( cntl ) ); // Perform gemm subproblem. bli_gemm_int( &BLIS_ONE, &a1_pack, &b_pack, &BLIS_ONE, &c1_pack, cntl_sub_gemm( cntl ) ); // Unpack C1 (if C1 was packed). bli_unpackm_int( &c1_pack, &c1, cntl_sub_unpackm_c( cntl ) ); } // If any packing buffers were acquired within packm, release them back // to the memory manager. bli_obj_release_pack( &a1_pack ); bli_obj_release_pack( &b_pack ); bli_obj_release_pack( &c1_pack ); }
void bli_gemm_blk_var2( obj_t* alpha, obj_t* a, obj_t* b, obj_t* beta, obj_t* c, gemm_t* cntl ) { obj_t a_pack_s; obj_t b1_pack_s; obj_t c1_pack_s; obj_t b1, c1; obj_t* a_pack = NULL; obj_t* b1_pack = NULL; obj_t* c1_pack = NULL; dim_t i; dim_t b_alg; dim_t n_trans; dim_t num_groups = bli_gemm_num_thread_groups( cntl->thread_info ); dim_t group_id = bli_gemm_group_id( cntl->thread_info ); if( bli_gemm_am_a_master( cntl->thread_info ) ) { // Initialize object for packing A. bli_obj_init_pack( &a_pack_s ); bli_packm_init( a, &a_pack_s, cntl_sub_packm_a( cntl ) ); } a_pack = bli_gemm_broadcast_a( cntl->thread_info, &a_pack_s ); // Pack A and scale by alpha (if instructed). bli_packm_int( alpha, a, a_pack, cntl_sub_packm_a( cntl ) ); bli_gemm_a_barrier( cntl->thread_info ); if( bli_gemm_am_b_master( cntl->thread_info )) { bli_obj_init_pack( &b1_pack_s ); } b1_pack = bli_gemm_broadcast_b( cntl->thread_info, &b1_pack_s ); if( bli_gemm_am_c_master( cntl->thread_info )) { bli_obj_init_pack( &c1_pack_s ); // Scale C by beta (if instructed). bli_scalm_int( beta, c, cntl_sub_scalm( cntl ) ); } c1_pack = bli_gemm_broadcast_c( cntl->thread_info, &c1_pack_s ); // Query dimension in partitioning direction. n_trans = bli_obj_width_after_trans( *b ); dim_t n_pt = n_trans / num_groups; n_pt = (n_pt * num_groups < n_trans) ? n_pt + 1 : n_pt; n_pt = (n_pt % 8 == 0) ? n_pt : n_pt + 8 - (n_pt % 8); dim_t start = group_id * n_pt; dim_t end = bli_min( start + n_pt, n_trans ); // Partition along the n dimension. for ( i = start; i < end; i += b_alg ) { // Determine the current algorithmic blocksize. // NOTE: Use of b (for execution datatype) is intentional! // This causes the right blocksize to be used if c and a are // complex and b is real. b_alg = bli_determine_blocksize_f( i, end, b, cntl_blocksize( cntl ) ); // Acquire partitions for C1 bli_acquire_mpart_l2r( BLIS_SUBPART1, i, b_alg, c, &c1 ); // Acquire partitions for B1 bli_acquire_mpart_l2r( BLIS_SUBPART1, i, b_alg, b, &b1 ); if( bli_gemm_am_b_master( cntl->thread_info )) { // Initialize objects for packing B1 bli_packm_init( &b1, &b1_pack_s, cntl_sub_packm_b( cntl ) ); } if( bli_gemm_am_c_master( cntl->thread_info )) { // Initialize objects for packing C1 bli_packm_init( &c1, &c1_pack_s, cntl_sub_packm_c( cntl ) ); } bli_gemm_b_barrier( cntl->thread_info ); bli_gemm_c_barrier( cntl->thread_info ); // Pack B1 and scale by alpha (if instructed). bli_packm_int( alpha, &b1, b1_pack, cntl_sub_packm_b( cntl ) ); // Pack C1 and scale by beta (if instructed). bli_packm_int( beta, &c1, c1_pack, cntl_sub_packm_c( cntl ) ); // Packing must be done before computation bli_gemm_b_barrier( cntl->thread_info ); bli_gemm_c_barrier( cntl->thread_info ); // Perform gemm subproblem. bli_gemm_int( alpha, a_pack, b1_pack, beta, c1_pack, cntl_sub_gemm( cntl ) ); // Unpack C1 (if C1 was packed). bli_unpackm_int( c1_pack, &c1, cntl_sub_unpackm_c( cntl ) ); } // If any packing buffers were acquired within packm, release them back // to the memory manager. bli_gemm_a_barrier( cntl->thread_info ); if( bli_gemm_am_a_master( cntl->thread_info )) bli_obj_release_pack( &a_pack_s ); bli_gemm_b_barrier( cntl->thread_info ); if( bli_gemm_am_b_master( cntl->thread_info )) { bli_obj_release_pack( &b1_pack_s ); } bli_gemm_c_barrier( cntl->thread_info ); if( bli_gemm_am_c_master( cntl->thread_info )) { bli_obj_release_pack( &c1_pack_s ); } }
void bli_trsm_blk_var1f( obj_t* a, obj_t* b, obj_t* c, trsm_t* cntl ) { obj_t a1, a1_pack; obj_t b_pack; obj_t c1; dim_t i; dim_t b_alg; dim_t m_trans; dim_t offA; // Initialize all pack objects that are passed into packm_init(). bli_obj_init_pack( &a1_pack ); bli_obj_init_pack( &b_pack ); // Set the default length of and offset to the non-zero part of A. m_trans = bli_obj_length_after_trans( *a ); offA = 0; // If A is lower triangular, we have to adjust where the non-zero part of // A begins. if ( bli_obj_is_lower( *a ) ) offA = bli_abs( bli_obj_diag_offset_after_trans( *a ) ); // Initialize object for packing B. bli_packm_init( b, &b_pack, cntl_sub_packm_b( cntl ) ); // Pack B1 (if instructed). bli_packm_int( b, &b_pack, cntl_sub_packm_b( cntl ) ); // Partition along the remaining portion of the m dimension. for ( i = offA; i < m_trans; i += b_alg ) { // Determine the current algorithmic blocksize. b_alg = bli_determine_blocksize_f( i, m_trans, a, cntl_blocksize( cntl ) ); // Acquire partitions for A1 and C1. bli_acquire_mpart_t2b( BLIS_SUBPART1, i, b_alg, a, &a1 ); bli_acquire_mpart_t2b( BLIS_SUBPART1, i, b_alg, c, &c1 ); // Initialize object for packing A1. bli_packm_init( &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); // Pack A1 (if instructed). bli_packm_int( &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); // Perform trsm subproblem. bli_trsm_int( &BLIS_ONE, &a1_pack, &b_pack, &BLIS_ONE, &c1, cntl_sub_trsm( cntl ) ); } // If any packing buffers were acquired within packm, release them back // to the memory manager. bli_obj_release_pack( &a1_pack ); bli_obj_release_pack( &b_pack ); }
void bli_her2_blk_var3( conj_t conjh, obj_t* alpha, obj_t* alpha_conj, obj_t* x, obj_t* y, obj_t* c, her2_t* cntl ) { obj_t c11, c11_pack; obj_t c10; obj_t c21; obj_t x1, x1_pack; obj_t y1, y1_pack; obj_t y0; obj_t y2; dim_t mn; dim_t ij; dim_t b_alg; // Even though this blocked algorithm is expressed only in terms of the // lower triangular case, the upper triangular case is still supported: // when bli_acquire_mpart_tl2br() is passed a matrix that is stored in // in the upper triangle, and the requested subpartition resides in the // lower triangle (as is the case for this algorithm), the routine fills // the request as if the caller had actually requested the corresponding // "mirror" subpartition in the upper triangle, except that it marks the // subpartition for transposition (and conjugation). // Initialize objects for packing. bli_obj_init_pack( &c11_pack ); bli_obj_init_pack( &x1_pack ); bli_obj_init_pack( &y1_pack ); // Query dimension. mn = bli_obj_length( *c ); // Partition diagonally. for ( ij = 0; ij < mn; ij += b_alg ) { // Determine the current algorithmic blocksize. b_alg = bli_determine_blocksize_f( ij, mn, c, cntl_blocksize( cntl ) ); // Acquire partitions for C11, C10, C21, x1, y1, y0, and y2. bli_acquire_mpart_tl2br( BLIS_SUBPART11, ij, b_alg, c, &c11 ); bli_acquire_mpart_tl2br( BLIS_SUBPART10, ij, b_alg, c, &c10 ); bli_acquire_mpart_tl2br( BLIS_SUBPART21, ij, b_alg, c, &c21 ); bli_acquire_vpart_f2b( BLIS_SUBPART1, ij, b_alg, x, &x1 ); bli_acquire_vpart_f2b( BLIS_SUBPART1, ij, b_alg, y, &y1 ); bli_acquire_vpart_f2b( BLIS_SUBPART0, ij, b_alg, y, &y0 ); bli_acquire_vpart_f2b( BLIS_SUBPART2, ij, b_alg, y, &y2 ); // Initialize objects for packing C11, x1, and y1 (if needed). bli_packm_init( &c11, &c11_pack, cntl_sub_packm_c11( cntl ) ); bli_packv_init( &x1, &x1_pack, cntl_sub_packv_x1( cntl ) ); bli_packv_init( &y1, &y1_pack, cntl_sub_packv_y1( cntl ) ); // Copy/pack C11, x1, y1 (if needed). bli_packm_int( &c11, &c11_pack, cntl_sub_packm_c11( cntl ), &BLIS_PACKM_SINGLE_THREADED ); bli_packv_int( &x1, &x1_pack, cntl_sub_packv_x1( cntl ) ); bli_packv_int( &y1, &y1_pack, cntl_sub_packv_y1( cntl ) ); // C10 = C10 + alpha * x1 * y0'; bli_ger_int( BLIS_NO_CONJUGATE, conjh, alpha, &x1_pack, &y0, &c10, cntl_sub_ger_rp( cntl ) ); // C21 = C21 + conj(alpha) * y2 * x1'; bli_ger_int( BLIS_NO_CONJUGATE, conjh, alpha_conj, &y2, &x1_pack, &c21, cntl_sub_ger_cp( cntl ) ); // C11 = C11 + alpha * x1 * y1' + conj(alpha) * y1 * x1'; bli_her2_int( conjh, alpha, alpha_conj, &x1_pack, &y1_pack, &c11_pack, cntl_sub_her2( cntl ) ); // Copy/unpack C11 (if C11 was packed). bli_unpackm_int( &c11_pack, &c11, cntl_sub_unpackm_c11( cntl ), &BLIS_PACKM_SINGLE_THREADED ); } // If any packing buffers were acquired within packm, release them back // to the memory manager. bli_obj_release_pack( &c11_pack ); bli_obj_release_pack( &x1_pack ); bli_obj_release_pack( &y1_pack ); }
void bli_trsm_blk_var2f( obj_t* a, obj_t* b, obj_t* c, trsm_t* cntl, trsm_thrinfo_t* thread ) { obj_t a_pack_s; obj_t b1_pack_s, c1_pack_s; obj_t b1, c1; obj_t* a_pack = NULL; obj_t* b1_pack = NULL; obj_t* c1_pack = NULL; dim_t i; dim_t b_alg; dim_t n_trans; // Initialize pack objects for A that are passed into packm_init(). if( thread_am_ochief( thread ) ) { bli_obj_init_pack( &a_pack_s ); // Initialize object for packing A. bli_packm_init( a, &a_pack_s, cntl_sub_packm_a( cntl ) ); // Scale C by beta (if instructed). bli_scalm_int( &BLIS_ONE, c, cntl_sub_scalm( cntl ) ); } a_pack = thread_obroadcast( thread, &a_pack_s ); // Initialize pack objects for B and C that are passed into packm_init(). if( thread_am_ichief( thread ) ) { bli_obj_init_pack( &b1_pack_s ); bli_obj_init_pack( &c1_pack_s ); } b1_pack = thread_ibroadcast( thread, &b1_pack_s ); c1_pack = thread_ibroadcast( thread, &c1_pack_s ); // Pack A (if instructed). bli_packm_int( a, a_pack, cntl_sub_packm_a( cntl ), trmm_thread_sub_opackm( thread ) ); // Query dimension in partitioning direction. n_trans = bli_obj_width_after_trans( *b ); dim_t start, end; num_t datatype = bli_obj_execution_datatype( *a ); bli_get_range( thread, 0, n_trans, bli_lcm( bli_info_get_default_nr( datatype ), bli_info_get_default_mr( datatype ) ), &start, &end ); // Partition along the n dimension. for ( i = start; i < end; i += b_alg ) { // Determine the current algorithmic blocksize. b_alg = bli_determine_blocksize_f( i, end, b, cntl_blocksize( cntl ) ); // Acquire partitions for B1 and C1. bli_acquire_mpart_l2r( BLIS_SUBPART1, i, b_alg, b, &b1 ); bli_acquire_mpart_l2r( BLIS_SUBPART1, i, b_alg, c, &c1 ); // Initialize objects for packing A1 and B1. if( thread_am_ichief( thread ) ) { bli_packm_init( &b1, b1_pack, cntl_sub_packm_b( cntl ) ); bli_packm_init( &c1, c1_pack, cntl_sub_packm_c( cntl ) ); } thread_ibarrier( thread ); // Pack B1 (if instructed). bli_packm_int( &b1, b1_pack, cntl_sub_packm_b( cntl ), trsm_thread_sub_ipackm( thread ) ); // Pack C1 (if instructed). bli_packm_int( &c1, c1_pack, cntl_sub_packm_c( cntl ), trsm_thread_sub_ipackm( thread ) ); // Perform trsm subproblem. bli_trsm_int( &BLIS_ONE, a_pack, b1_pack, &BLIS_ONE, c1_pack, cntl_sub_trsm( cntl ), trsm_thread_sub_trsm( thread ) ); // Unpack C1 (if C1 was packed). bli_unpackm_int( c1_pack, &c1, cntl_sub_unpackm_c( cntl ), trsm_thread_sub_ipackm( thread ) ); } // If any packing buffers were acquired within packm, release them back // to the memory manager. thread_obarrier( thread ); if( thread_am_ochief( thread ) ) bli_obj_release_pack( a_pack ); if( thread_am_ichief( thread ) ) { bli_obj_release_pack( b1_pack ); bli_obj_release_pack( c1_pack ); } }
void bli_trmm_lu_blk_var1( obj_t* alpha, obj_t* a, obj_t* b, obj_t* beta, obj_t* c, trmm_t* cntl ) { obj_t a1, a1_pack; obj_t b_pack; obj_t c1, c1_pack; dim_t i; dim_t b_alg; dim_t mT_trans; // Initialize all pack objects that are passed into packm_init(). bli_obj_init_pack( &a1_pack ); bli_obj_init_pack( &b_pack ); bli_obj_init_pack( &c1_pack ); // If A is [upper] triangular, use the diagonal offset of A to determine // the length of the non-zero region. if ( bli_obj_is_triangular( *a ) ) mT_trans = bli_abs( bli_obj_diag_offset_after_trans( *a ) ) + bli_obj_width_after_trans( *a ); else // if ( bli_obj_is_general( *a ) mT_trans = bli_obj_length_after_trans( *a ); // Scale C by beta (if instructed). bli_scalm_int( beta, c, cntl_sub_scalm( cntl ) ); // Initialize object for packing B. bli_packm_init( b, &b_pack, cntl_sub_packm_b( cntl ) ); // Pack B and scale by alpha (if instructed). bli_packm_int( alpha, b, &b_pack, cntl_sub_packm_b( cntl ) ); // Partition along the m dimension. for ( i = 0; i < mT_trans; i += b_alg ) { // Determine the current algorithmic blocksize. b_alg = bli_determine_blocksize_f( i, mT_trans, a, cntl_blocksize( cntl ) ); // Acquire partitions for A1 and C1. bli_acquire_mpart_t2b( BLIS_SUBPART1, i, b_alg, a, &a1 ); bli_acquire_mpart_t2b( BLIS_SUBPART1, i, b_alg, c, &c1 ); // Initialize objects for packing A1 and C1. bli_packm_init( &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); bli_packm_init( &c1, &c1_pack, cntl_sub_packm_c( cntl ) ); // Pack A1 and scale by alpha (if instructed). bli_packm_int( alpha, &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); // Pack C1 and scale by beta (if instructed). bli_packm_int( beta, &c1, &c1_pack, cntl_sub_packm_c( cntl ) ); // Perform trmm subproblem. bli_trmm_int( BLIS_LEFT, alpha, &a1_pack, &b_pack, beta, &c1_pack, cntl_sub_trmm( cntl ) ); // Unpack C1 (if C1 was packed). bli_unpackm_int( &c1_pack, &c1, cntl_sub_unpackm_c( cntl ) ); } // If any packing buffers were acquired within packm, release them back // to the memory manager. bli_obj_release_pack( &a1_pack ); bli_obj_release_pack( &b_pack ); bli_obj_release_pack( &c1_pack ); }
void bli_trmm_blk_var2f( obj_t* a, obj_t* b, obj_t* c, gemm_t* cntl, trmm_thrinfo_t* thread ) { obj_t a_pack_s; obj_t b1_pack_s, c1_pack_s; obj_t b1, c1; obj_t* a_pack = NULL; obj_t* b1_pack = NULL; obj_t* c1_pack = NULL; dim_t i; dim_t b_alg; // Prune any zero region that exists along the partitioning dimension. bli_trmm_prune_unref_mparts_n( a, b, c ); if( thread_am_ochief( thread ) ) { // Initialize object for packing A bli_obj_init_pack( &a_pack_s ); bli_packm_init( a, &a_pack_s, cntl_sub_packm_a( cntl ) ); // Scale C by beta (if instructed). bli_scalm_int( &BLIS_ONE, c, cntl_sub_scalm( cntl ) ); } a_pack = thread_obroadcast( thread, &a_pack_s ); // Initialize pack objects for B and C that are passed into packm_init(). if( thread_am_ichief( thread ) ) { bli_obj_init_pack( &b1_pack_s ); bli_obj_init_pack( &c1_pack_s ); } b1_pack = thread_ibroadcast( thread, &b1_pack_s ); c1_pack = thread_ibroadcast( thread, &c1_pack_s ); // Pack A (if instructed). bli_packm_int( a, a_pack, cntl_sub_packm_a( cntl ), trmm_thread_sub_opackm( thread ) ); dim_t my_start, my_end; bli_get_range_weighted_l2r( thread, b, bli_blksz_get_mult_for_obj( b, cntl_blocksize( cntl ) ), &my_start, &my_end ); // Partition along the n dimension. for ( i = my_start; i < my_end; i += b_alg ) { // Determine the current algorithmic blocksize. b_alg = bli_determine_blocksize_f( i, my_end, b, cntl_blocksize( cntl ) ); // Acquire partitions for B1 and C1. bli_acquire_mpart_l2r( BLIS_SUBPART1, i, b_alg, b, &b1 ); bli_acquire_mpart_l2r( BLIS_SUBPART1, i, b_alg, c, &c1 ); // Initialize objects for packing A1 and B1. if( thread_am_ichief( thread ) ) { bli_packm_init( &b1, b1_pack, cntl_sub_packm_b( cntl ) ); bli_packm_init( &c1, c1_pack, cntl_sub_packm_c( cntl ) ); } thread_ibarrier( thread ); // Pack B1 (if instructed). bli_packm_int( &b1, b1_pack, cntl_sub_packm_b( cntl ), trmm_thread_sub_ipackm( thread ) ); // Pack C1 (if instructed). bli_packm_int( &c1, c1_pack, cntl_sub_packm_c( cntl ), trmm_thread_sub_ipackm( thread ) ); // Perform trmm subproblem. bli_trmm_int( &BLIS_ONE, a_pack, b1_pack, &BLIS_ONE, c1_pack, cntl_sub_gemm( cntl ), trmm_thread_sub_trmm( thread ) ); thread_ibarrier( thread ); // Unpack C1 (if C1 was packed). bli_unpackm_int( c1_pack, &c1, cntl_sub_unpackm_c( cntl ), trmm_thread_sub_ipackm( thread ) ); } // If any packing buffers were acquired within packm, release them back // to the memory manager. thread_obarrier( thread ); if( thread_am_ochief( thread ) ) bli_packm_release( a_pack, cntl_sub_packm_a( cntl ) ); if( thread_am_ichief( thread ) ) { bli_packm_release( b1_pack, cntl_sub_packm_b( cntl ) ); bli_packm_release( c1_pack, cntl_sub_packm_c( cntl ) ); } }
void bli_gemv_blk_var1( obj_t* alpha, obj_t* a, obj_t* x, obj_t* beta, obj_t* y, gemv_t* cntl ) { obj_t a1, a1_pack; obj_t y1, y1_pack; dim_t m_trans; dim_t i; dim_t b_alg; // Initialize objects for packing. bli_obj_init_pack( &a1_pack ); bli_obj_init_pack( &y1_pack ); // Query dimension in partitioning direction. m_trans = bli_obj_length_after_trans( *a ); // Partition along the m dimension. for ( i = 0; i < m_trans; i += b_alg ) { // Determine the current algorithmic blocksize. b_alg = bli_determine_blocksize_f( i, m_trans, a, cntl_blocksize( cntl ) ); // Acquire partitions for A1 and y1. bli_acquire_mpart_t2b( BLIS_SUBPART1, i, b_alg, a, &a1 ); bli_acquire_vpart_f2b( BLIS_SUBPART1, i, b_alg, y, &y1 ); // Initialize objects for packing A1 and y1 (if needed). bli_packm_init( &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); bli_packv_init( &y1, &y1_pack, cntl_sub_packv_y( cntl ) ); // Copy/pack A1, y1 (if needed). bli_packm_int( &a1, &a1_pack, cntl_sub_packm_a( cntl ), &BLIS_PACKM_SINGLE_THREADED ); bli_packv_int( &y1, &y1_pack, cntl_sub_packv_y( cntl ) ); // y1 = beta * y1 + alpha * A1 * x; bli_gemv_int( BLIS_NO_TRANSPOSE, BLIS_NO_CONJUGATE, alpha, &a1_pack, x, beta, &y1_pack, cntl_sub_gemv( cntl ) ); // Copy/unpack y1 (if y1 was packed). bli_unpackv_int( &y1_pack, &y1, cntl_sub_unpackv_y( cntl ) ); } // If any packing buffers were acquired within packm, release them back // to the memory manager. bli_packm_release( &a1_pack, cntl_sub_packm_a( cntl ) ); bli_packv_release( &y1_pack, cntl_sub_packv_y( cntl ) ); }
void bli_gemm_blk_var4f( obj_t* a, obj_t* b, obj_t* c, gemm_t* cntl, gemm_thrinfo_t* thread ) { extern packm_t* gemm3mh_packa_cntl_ro; extern packm_t* gemm3mh_packa_cntl_io; extern packm_t* gemm3mh_packa_cntl_rpi; packm_t* packa_cntl_ro = gemm3mh_packa_cntl_ro; packm_t* packa_cntl_io = gemm3mh_packa_cntl_io; packm_t* packa_cntl_rpi = gemm3mh_packa_cntl_rpi; //The s is for "lives on the stack" obj_t b_pack_s; obj_t a1_pack_s, c1_pack_s; obj_t a1, c1; obj_t* a1_pack = NULL; obj_t* b_pack = NULL; obj_t* c1_pack = NULL; dim_t i; dim_t b_alg; dim_t m_trans; if( thread_am_ochief( thread ) ) { // Initialize object for packing B. bli_obj_init_pack( &b_pack_s ); bli_packm_init( b, &b_pack_s, cntl_sub_packm_b( cntl ) ); // Scale C by beta (if instructed). // Since scalm doesn't support multithreading yet, must be done by chief thread (ew) bli_scalm_int( &BLIS_ONE, c, cntl_sub_scalm( cntl ) ); } b_pack = thread_obroadcast( thread, &b_pack_s ); // Initialize objects passed into bli_packm_init for A and C if( thread_am_ichief( thread ) ) { bli_obj_init_pack( &a1_pack_s ); bli_obj_init_pack( &c1_pack_s ); } a1_pack = thread_ibroadcast( thread, &a1_pack_s ); c1_pack = thread_ibroadcast( thread, &c1_pack_s ); // Pack B (if instructed). bli_packm_int( b, b_pack, cntl_sub_packm_b( cntl ), gemm_thread_sub_opackm( thread ) ); // Query dimension in partitioning direction. m_trans = bli_obj_length_after_trans( *a ); dim_t start, end; bli_get_range_t2b( thread, 0, m_trans, bli_blksz_get_mult_for_obj( a, cntl_blocksize( cntl ) ), &start, &end ); // Partition along the m dimension. for ( i = start; i < end; i += b_alg ) { // Determine the current algorithmic blocksize. // NOTE: Use of a (for execution datatype) is intentional! // This causes the right blocksize to be used if c and a are // complex and b is real. b_alg = bli_determine_blocksize_f( i, end, a, cntl_blocksize( cntl ) ); // Acquire partitions for A1 and C1. bli_acquire_mpart_t2b( BLIS_SUBPART1, i, b_alg, a, &a1 ); bli_acquire_mpart_t2b( BLIS_SUBPART1, i, b_alg, c, &c1 ); // Initialize objects for packing A1 and C1. if( thread_am_ichief( thread ) ) { bli_packm_init( &a1, a1_pack, packa_cntl_ro ); bli_packm_init( &c1, c1_pack, cntl_sub_packm_c( cntl ) ); } thread_ibarrier( thread ); // Pack A1 (if instructed). bli_packm_int( &a1, a1_pack, packa_cntl_ro, gemm_thread_sub_ipackm( thread ) ); // Pack C1 (if instructed). bli_packm_int( &c1, c1_pack, cntl_sub_packm_c( cntl ), gemm_thread_sub_ipackm( thread ) ); // Perform gemm subproblem. bli_gemm_int( &BLIS_ONE, a1_pack, b_pack, &BLIS_ONE, c1_pack, cntl_sub_gemm( cntl ), gemm_thread_sub_gemm( thread ) ); thread_ibarrier( thread ); // Only apply beta within the first of three subproblems. if ( thread_am_ichief( thread ) ) bli_obj_scalar_reset( c1_pack ); // Initialize objects for packing A1 and C1. if( thread_am_ichief( thread ) ) { bli_packm_init( &a1, a1_pack, packa_cntl_io ); } thread_ibarrier( thread ); // Pack A1 (if instructed). bli_packm_int( &a1, a1_pack, packa_cntl_io, gemm_thread_sub_ipackm( thread ) ); // Perform gemm subproblem. bli_gemm_int( &BLIS_ONE, a1_pack, b_pack, &BLIS_ONE, c1_pack, cntl_sub_gemm( cntl ), gemm_thread_sub_gemm( thread ) ); thread_ibarrier( thread ); // Initialize objects for packing A1 and C1. if( thread_am_ichief( thread ) ) { bli_packm_init( &a1, a1_pack, packa_cntl_rpi ); } thread_ibarrier( thread ); // Pack A1 (if instructed). bli_packm_int( &a1, a1_pack, packa_cntl_rpi, gemm_thread_sub_ipackm( thread ) ); // Perform gemm subproblem. bli_gemm_int( &BLIS_ONE, a1_pack, b_pack, &BLIS_ONE, c1_pack, cntl_sub_gemm( cntl ), gemm_thread_sub_gemm( thread ) ); thread_ibarrier( thread ); // Unpack C1 (if C1 was packed). // Currently must be done by 1 thread bli_unpackm_int( c1_pack, &c1, cntl_sub_unpackm_c( cntl ), gemm_thread_sub_ipackm( thread ) ); } // If any packing buffers were acquired within packm, release them back // to the memory manager. thread_obarrier( thread ); if( thread_am_ochief( thread ) ) bli_packm_release( b_pack, cntl_sub_packm_b( cntl ) ); if( thread_am_ichief( thread ) ){ // It doesn't matter which packm cntl node we pass in, as long // as it is valid, packm_release() will release the mem_t entry. bli_packm_release( a1_pack, packa_cntl_ro ); bli_packm_release( c1_pack, cntl_sub_packm_c( cntl ) ); } }
void bli_trsm_l_blk_var4( obj_t* alpha, obj_t* a, obj_t* b, obj_t* beta, obj_t* c, trsm_t* cntl ) { obj_t a1, a1_pack; obj_t b_pack; obj_t c1; dim_t i; dim_t bm_alg; dim_t m_trans; dim_t offB; // Initialize all pack objects that are passed into packm_init(). bli_obj_init_pack( &a1_pack ); bli_obj_init_pack( &b_pack ); // Query dimension in partitioning direction. m_trans = bli_obj_length_after_trans( *a ); // Use the diagonal offset of A to skip over the zero region. offB = bli_abs( bli_obj_diag_offset_after_trans( *a ) ); // Initialize object for packing B. bli_packm_init( b, &b_pack, cntl_sub_packm_b( cntl ) ); // Fuse the first iteration with incremental packing and computation. { obj_t b_inc, b_pack_inc; obj_t c1_inc; dim_t j; dim_t bn_inc; dim_t n_trans; // Query dimension in partitioning direction. n_trans = bli_obj_width( b_pack ); // Determine the current algorithmic blocksize. bm_alg = bli_determine_blocksize_f( offB, m_trans, a, cntl_blocksize( cntl ) ); // Acquire partitions for A1 and C1. bli_acquire_mpart_t2b( BLIS_SUBPART1, offB, bm_alg, a, &a1 ); bli_acquire_mpart_t2b( BLIS_SUBPART1, offB, bm_alg, c, &c1 ); // Initialize objects for packing A1 and C1. bli_packm_init( &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); // Pack A1 and scale by alpha (if instructed). bli_packm_int( alpha, &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); // Partition along the n dimension. for ( j = 0; j < n_trans; j += bn_inc ) { // Determine the current incremental packing blocksize. bn_inc = bli_determine_blocksize_f( j, n_trans, b, cntl_blocksize_aux( cntl ) ); // Acquire partitions. bli_acquire_mpart_l2r( BLIS_SUBPART1, j, bn_inc, b, &b_inc ); bli_acquire_mpart_l2r( BLIS_SUBPART1, j, bn_inc, &b_pack, &b_pack_inc ); bli_acquire_mpart_l2r( BLIS_SUBPART1, j, bn_inc, &c1, &c1_inc ); // Pack B1 and scale by alpha (if instructed). bli_packm_int( alpha, &b_inc, &b_pack_inc, cntl_sub_packm_b( cntl ) ); // Perform trsm subproblem. bli_trsm_int( BLIS_LEFT, alpha, &a1_pack, &b_pack_inc, beta, &c1_inc, cntl_sub_trsm( cntl ) ); } // Unpack B to the corresponding region of C. (Note that B and C1 are // conformal since A1 is square.) //bli_unpackm_int( &b_pack, &c1, // cntl_sub_unpackm_c( cntl ) ); } // Partition along the remaining portion of the m dimension. for ( i = offB + bm_alg; i < m_trans; i += bm_alg ) { // Determine the current algorithmic blocksize. bm_alg = bli_determine_blocksize_f( i, m_trans, a, cntl_blocksize( cntl ) ); // Acquire partitions for A1 and C1. bli_acquire_mpart_t2b( BLIS_SUBPART1, i, bm_alg, a, &a1 ); bli_acquire_mpart_t2b( BLIS_SUBPART1, i, bm_alg, c, &c1 ); // Initialize object for packing A1. bli_packm_init( &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); // Pack A1 and scale by alpha (if instructed). bli_packm_int( alpha, &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); // Perform trsm subproblem. if ( bli_obj_intersects_diag( a1_pack ) ) bli_trsm_int( BLIS_LEFT, alpha, &a1_pack, &b_pack, beta, &c1, cntl_sub_trsm( cntl ) ); else bli_gemm_int( &BLIS_MINUS_ONE, &a1_pack, &b_pack, &BLIS_ONE, &c1, cntl_sub_gemm( cntl ) ); } // If any packing buffers were acquired within packm, release them back // to the memory manager. bli_obj_release_pack( &a1_pack ); bli_obj_release_pack( &b_pack ); }
void bli_trmm_blk_var1f( obj_t* a, obj_t* b, obj_t* c, gemm_t* cntl, trmm_thrinfo_t* thread ) { obj_t b_pack_s; obj_t a1_pack_s, c1_pack_s; obj_t a1, c1; obj_t* a1_pack = NULL; obj_t* b_pack = NULL; obj_t* c1_pack = NULL; dim_t i; dim_t b_alg; // Prune any zero region that exists along the partitioning dimension. bli_trmm_prune_unref_mparts_m( a, b, c ); if( thread_am_ochief( thread ) ) { // Initialize object for packing B. bli_obj_init_pack( &b_pack_s ); bli_packm_init( b, &b_pack_s, cntl_sub_packm_b( cntl ) ); // Scale C by beta (if instructed). // Since scalm doesn't support multithreading yet, must be done by chief thread (ew) bli_scalm_int( &BLIS_ONE, c, cntl_sub_scalm( cntl ) ); } b_pack = thread_obroadcast( thread, &b_pack_s ); // Initialize all pack objects that are passed into packm_init(). if( thread_am_ichief( thread ) ) { bli_obj_init_pack( &a1_pack_s ); bli_obj_init_pack( &c1_pack_s ); } a1_pack = thread_ibroadcast( thread, &a1_pack_s ); c1_pack = thread_ibroadcast( thread, &c1_pack_s ); // Pack B (if instructed). bli_packm_int( b, b_pack, cntl_sub_packm_b( cntl ), trmm_thread_sub_opackm( thread ) ); // Set the default length of and offset to the non-zero part of A. //m_trans = bli_obj_length_after_trans( *a ); //offA = 0; // If A is lower triangular, we have to adjust where the non-zero part of // A begins. If A is upper triangular, we have to adjust the length of // the non-zero part. If A is general/dense, then we keep the defaults. //if ( bli_obj_is_lower( *a ) ) // offA = bli_abs( bli_obj_diag_offset_after_trans( *a ) ); //else if ( bli_obj_is_upper( *a ) ) // m_trans = bli_abs( bli_obj_diag_offset_after_trans( *a ) ) + // bli_obj_width_after_trans( *a ); dim_t my_start, my_end; bli_get_range_weighted_t2b( thread, a, bli_blksz_get_mult_for_obj( a, cntl_blocksize( cntl ) ), &my_start, &my_end ); // Partition along the m dimension. for ( i = my_start; i < my_end; i += b_alg ) { // Determine the current algorithmic blocksize. b_alg = bli_determine_blocksize_f( i, my_end, a, cntl_blocksize( cntl ) ); // Acquire partitions for A1 and C1. bli_acquire_mpart_t2b( BLIS_SUBPART1, i, b_alg, a, &a1 ); bli_acquire_mpart_t2b( BLIS_SUBPART1, i, b_alg, c, &c1 ); // Initialize objects for packing A1 and C1. if( thread_am_ichief( thread ) ) { bli_packm_init( &a1, a1_pack, cntl_sub_packm_a( cntl ) ); bli_packm_init( &c1, c1_pack, cntl_sub_packm_c( cntl ) ); } thread_ibarrier( thread ); // Pack A1 (if instructed). bli_packm_int( &a1, a1_pack, cntl_sub_packm_a( cntl ), trmm_thread_sub_ipackm( thread ) ); // Pack C1 (if instructed). bli_packm_int( &c1, c1_pack, cntl_sub_packm_c( cntl ), trmm_thread_sub_ipackm( thread ) ); // Perform trmm subproblem. bli_trmm_int( &BLIS_ONE, a1_pack, b_pack, &BLIS_ONE, c1_pack, cntl_sub_gemm( cntl ), trmm_thread_sub_trmm( thread ) ); thread_ibarrier( thread ); // Unpack C1 (if C1 was packed). bli_unpackm_int( c1_pack, &c1, cntl_sub_unpackm_c( cntl ), trmm_thread_sub_ipackm( thread ) ); } // If any packing buffers were acquired within packm, release them back // to the memory manager. thread_obarrier( thread ); if( thread_am_ochief( thread ) ) bli_packm_release( b_pack, cntl_sub_packm_b( cntl ) ); if( thread_am_ichief( thread ) ){ bli_packm_release( a1_pack, cntl_sub_packm_a( cntl ) ); bli_packm_release( c1_pack, cntl_sub_packm_c( cntl ) ); } }
void bli_trsm_blk_var1f( obj_t* a, obj_t* b, obj_t* c, trsm_t* cntl, trsm_thrinfo_t* thread ) { obj_t b_pack_s; obj_t a1_pack_s; obj_t a1, c1; obj_t* b_pack = NULL; obj_t* a1_pack = NULL; dim_t i; dim_t b_alg; dim_t m_trans; dim_t offA; // Initialize object for packing B. if( thread_am_ochief( thread ) ) { bli_obj_init_pack( &b_pack_s ); bli_packm_init( b, &b_pack_s, cntl_sub_packm_b( cntl ) ); } b_pack = thread_obroadcast( thread, &b_pack_s ); // Initialize object for packing B. if( thread_am_ichief( thread ) ) { bli_obj_init_pack( &a1_pack_s ); } a1_pack = thread_ibroadcast( thread, &a1_pack_s ); // Pack B1 (if instructed). bli_packm_int( b, b_pack, cntl_sub_packm_b( cntl ), trsm_thread_sub_opackm( thread ) ); // Set the default length of and offset to the non-zero part of A. m_trans = bli_obj_length_after_trans( *a ); offA = 0; // If A is lower triangular, we have to adjust where the non-zero part of // A begins. if ( bli_obj_is_lower( *a ) ) offA = bli_abs( bli_obj_diag_offset_after_trans( *a ) ); dim_t start, end; num_t dt = bli_obj_execution_datatype( *a ); bli_get_range_t2b( thread, offA, m_trans, //bli_lcm( bli_info_get_default_nr( BLIS_TRSM, dt ), bli_info_get_default_mr( BLIS_TRSM, dt ) ), bli_info_get_default_mc( BLIS_TRSM, dt ), &start, &end ); // Partition along the remaining portion of the m dimension. for ( i = start; i < end; i += b_alg ) { // Determine the current algorithmic blocksize. b_alg = bli_determine_blocksize_f( i, end, a, cntl_blocksize( cntl ) ); // Acquire partitions for A1 and C1. bli_acquire_mpart_t2b( BLIS_SUBPART1, i, b_alg, a, &a1 ); bli_acquire_mpart_t2b( BLIS_SUBPART1, i, b_alg, c, &c1 ); // Initialize object for packing A1. if( thread_am_ichief( thread ) ) { bli_packm_init( &a1, a1_pack, cntl_sub_packm_a( cntl ) ); } thread_ibarrier( thread ); // Pack A1 (if instructed). bli_packm_int( &a1, a1_pack, cntl_sub_packm_a( cntl ), trsm_thread_sub_ipackm( thread ) ); // Perform trsm subproblem. bli_trsm_int( &BLIS_ONE, a1_pack, b_pack, &BLIS_ONE, &c1, cntl_sub_trsm( cntl ), trsm_thread_sub_trsm( thread ) ); thread_ibarrier( thread ); } // If any packing buffers were acquired within packm, release them back // to the memory manager. thread_obarrier( thread ); if( thread_am_ochief( thread ) ) bli_packm_release( b_pack, cntl_sub_packm_b( cntl ) ); if( thread_am_ichief( thread ) ) bli_packm_release( a1_pack, cntl_sub_packm_a( cntl ) ); }
void bli_trsm_u_blk_var4( obj_t* alpha, obj_t* a, obj_t* b, obj_t* beta, obj_t* c, trsm_t* cntl ) { obj_t a1, a1_pack; obj_t b_pack; obj_t c1; dim_t i; dim_t bm_alg; dim_t m_trans; // Initialize all pack objects that are passed into packm_init(). bli_obj_init_pack( &a1_pack ); bli_obj_init_pack( &b_pack ); // Query dimension in partitioning direction. m_trans = bli_obj_length_after_trans( *a ); // Initialize object for packing B. bli_packm_init( b, &b_pack, cntl_sub_packm_b( cntl ) ); // Find the offset to the first non-zero block of A. for ( i = 0; i < m_trans; i += bm_alg ) { // Determine the current algorithmic blocksize. bm_alg = bli_determine_blocksize_b( i, m_trans, a, cntl_blocksize( cntl ) ); // Acquire partitions for A1 and C1. bli_acquire_mpart_b2t( BLIS_SUBPART1, i, bm_alg, a, &a1 ); if ( !bli_obj_is_zeros( a1 ) ) break; } // Fuse the first iteration with incremental packing and computation. { obj_t b_inc, b_pack_inc; obj_t c1_inc; dim_t j; dim_t bn_inc; dim_t n_trans; // Query dimension in partitioning direction. n_trans = bli_obj_width( b_pack ); // Determine the current algorithmic blocksize. bm_alg = bli_determine_blocksize_b( i, m_trans, a, cntl_blocksize( cntl ) ); // Acquire partitions for A1 and C1. bli_acquire_mpart_b2t( BLIS_SUBPART1, i, bm_alg, a, &a1 ); bli_acquire_mpart_b2t( BLIS_SUBPART1, i, bm_alg, c, &c1 ); // Initialize objects for packing A1 and C1. bli_packm_init( &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); // Pack A1 and scale by alpha (if instructed). bli_packm_int( alpha, &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); // Partition along the n dimension. for ( j = 0; j < n_trans; j += bn_inc ) { // Determine the current incremental packing blocksize. bn_inc = bli_determine_blocksize_f( j, n_trans, b, cntl_blocksize_aux( cntl ) ); // Acquire partitions. bli_acquire_mpart_l2r( BLIS_SUBPART1, j, bn_inc, b, &b_inc ); bli_acquire_mpart_l2r( BLIS_SUBPART1, j, bn_inc, &b_pack, &b_pack_inc ); bli_acquire_mpart_l2r( BLIS_SUBPART1, j, bn_inc, &c1, &c1_inc ); // Pack B1 and scale by alpha (if instructed). bli_packm_int( alpha, &b_inc, &b_pack_inc, cntl_sub_packm_b( cntl ) ); // Perform trsm subproblem. bli_trsm_int( BLIS_LEFT, alpha, &a1_pack, &b_pack_inc, beta, &c1_inc, cntl_sub_trsm( cntl ) ); } } // Partition along the remaining portion of the m dimension. for ( i = i + bm_alg; i < m_trans; i += bm_alg ) { // Determine the current algorithmic blocksize. bm_alg = bli_determine_blocksize_b( i, m_trans, a, cntl_blocksize( cntl ) ); // Acquire partitions for A1 and C1. bli_acquire_mpart_b2t( BLIS_SUBPART1, i, bm_alg, a, &a1 ); bli_acquire_mpart_b2t( BLIS_SUBPART1, i, bm_alg, c, &c1 ); // Initialize object for packing A1. bli_packm_init( &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); // Pack A1 and scale by alpha (if instructed). bli_packm_int( alpha, &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); if ( bli_obj_intersects_diag( a1_pack ) ) bli_trsm_int( BLIS_LEFT, alpha, &a1_pack, &b_pack, beta, &c1, cntl_sub_trsm( cntl ) ); else bli_gemm_int( &BLIS_MINUS_ONE, &a1_pack, &b_pack, &BLIS_ONE, &c1, cntl_sub_gemm( cntl ) ); } // If any packing buffers were acquired within packm, release them back // to the memory manager. bli_obj_release_pack( &a1_pack ); bli_obj_release_pack( &b_pack ); }
void bli_trmv_u_blk_var1( obj_t* alpha, obj_t* a, obj_t* x, trmv_t* cntl ) { obj_t a11, a11_pack; obj_t a12; obj_t x1, x1_pack; obj_t x2; dim_t mn; dim_t ij; dim_t b_alg; // Initialize objects for packing. bli_obj_init_pack( &a11_pack ); bli_obj_init_pack( &x1_pack ); // Query dimension. mn = bli_obj_length( *a ); // Partition diagonally. for ( ij = 0; ij < mn; ij += b_alg ) { // Determine the current algorithmic blocksize. b_alg = bli_determine_blocksize_f( ij, mn, a, cntl_blocksize( cntl ) ); // Acquire partitions for A11, A12, x1, and x2. bli_acquire_mpart_tl2br( BLIS_SUBPART11, ij, b_alg, a, &a11 ); bli_acquire_mpart_tl2br( BLIS_SUBPART12, ij, b_alg, a, &a12 ); bli_acquire_vpart_f2b( BLIS_SUBPART1, ij, b_alg, x, &x1 ); bli_acquire_vpart_f2b( BLIS_SUBPART2, ij, b_alg, x, &x2 ); // Initialize objects for packing A11 and x1 (if needed). bli_packm_init( &a11, &a11_pack, cntl_sub_packm_a11( cntl ) ); bli_packv_init( &x1, &x1_pack, cntl_sub_packv_x1( cntl ) ); // Copy/pack A11, x1 (if needed). bli_packm_int( &a11, &a11_pack, cntl_sub_packm_a11( cntl ), &BLIS_PACKM_SINGLE_THREADED ); bli_packv_int( &x1, &x1_pack, cntl_sub_packv_x1( cntl ) ); // x1 = alpha * triu( A11 ) * x1; bli_trmv_int( alpha, &a11_pack, &x1_pack, cntl_sub_trmv( cntl ) ); // x1 = x1 + alpha * A12 * x2; bli_gemv_int( BLIS_NO_TRANSPOSE, BLIS_NO_CONJUGATE, alpha, &a12, &x2, &BLIS_ONE, &x1_pack, cntl_sub_gemv_rp( cntl ) ); // Copy/unpack x1 (if x1 was packed). bli_unpackv_int( &x1_pack, &x1, cntl_sub_unpackv_x1( cntl ) ); } // If any packing buffers were acquired within packm, release them back // to the memory manager. bli_packm_release( &a11_pack, cntl_sub_packm_a11( cntl ) ); bli_packv_release( &x1_pack, cntl_sub_packv_x1( cntl ) ); }
void bli_herk_blk_var1f( obj_t* a, obj_t* ah, obj_t* c, herk_t* cntl ) { obj_t a1, a1_pack; obj_t ah_pack; obj_t c1, c1_pack; dim_t i; dim_t b_alg; dim_t m_trans; // Initialize all pack objects that are passed into packm_init(). bli_obj_init_pack( &a1_pack ); bli_obj_init_pack( &ah_pack ); bli_obj_init_pack( &c1_pack ); // Query dimension in partitioning direction. m_trans = bli_obj_length_after_trans( *c ); // Scale C by beta (if instructed). bli_scalm_int( &BLIS_ONE, c, cntl_sub_scalm( cntl ) ); // Initialize object for packing A'. bli_packm_init( ah, &ah_pack, cntl_sub_packm_b( cntl ) ); // Pack A' (if instructed). bli_packm_int( ah, &ah_pack, cntl_sub_packm_b( cntl ) ); // Partition along the m dimension. for ( i = 0; i < m_trans; i += b_alg ) { // Determine the current algorithmic blocksize. b_alg = bli_determine_blocksize_f( i, m_trans, a, cntl_blocksize( cntl ) ); // Acquire partitions for A1 and C1. bli_acquire_mpart_t2b( BLIS_SUBPART1, i, b_alg, a, &a1 ); bli_acquire_mpart_t2b( BLIS_SUBPART1, i, b_alg, c, &c1 ); // Initialize objects for packing A1 and C1. bli_packm_init( &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); bli_packm_init( &c1, &c1_pack, cntl_sub_packm_c( cntl ) ); // Pack A1 (if instructed). bli_packm_int( &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); // Pack C1 (if instructed). bli_packm_int( &c1, &c1_pack, cntl_sub_packm_c( cntl ) ); // Perform herk subproblem. bli_herk_int( &BLIS_ONE, &a1_pack, &ah_pack, &BLIS_ONE, &c1_pack, cntl_sub_herk( cntl ) ); // Unpack C1 (if C1 was packed). bli_unpackm_int( &c1_pack, &c1, cntl_sub_unpackm_c( cntl ) ); } // If any packing buffers were acquired within packm, release them back // to the memory manager. bli_obj_release_pack( &a1_pack ); bli_obj_release_pack( &ah_pack ); bli_obj_release_pack( &c1_pack ); }
void bli_gemm_blk_var3f( obj_t* a, obj_t* b, obj_t* c, gemm_t* cntl ) { obj_t a1, a1_pack; obj_t b1, b1_pack; obj_t c_pack; dim_t i; dim_t b_alg; dim_t k_trans; // Initialize all pack objects that are passed into packm_init(). bli_obj_init_pack( &a1_pack ); bli_obj_init_pack( &b1_pack ); bli_obj_init_pack( &c_pack ); // Query dimension in partitioning direction. k_trans = bli_obj_width_after_trans( *a ); // Scale C by beta (if instructed). bli_scalm_int( &BLIS_ONE, c, cntl_sub_scalm( cntl ) ); // Initialize object for packing C. bli_packm_init( c, &c_pack, cntl_sub_packm_c( cntl ) ); // Pack C (if instructed). bli_packm_int( c, &c_pack, cntl_sub_packm_c( cntl ) ); // Partition along the k dimension. for ( i = 0; i < k_trans; i += b_alg ) { // Determine the current algorithmic blocksize. // NOTE: Use of b (for execution datatype) is intentional! // This causes the right blocksize to be used if c and a are // complex and b is real. b_alg = bli_determine_blocksize_f( i, k_trans, b, cntl_blocksize( cntl ) ); // Acquire partitions for A1 and B1. bli_acquire_mpart_l2r( BLIS_SUBPART1, i, b_alg, a, &a1 ); bli_acquire_mpart_t2b( BLIS_SUBPART1, i, b_alg, b, &b1 ); // Initialize objects for packing A1 and B1. bli_packm_init( &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); bli_packm_init( &b1, &b1_pack, cntl_sub_packm_b( cntl ) ); // Pack A1 (if instructed). bli_packm_int( &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); // Pack B1 (if instructed). bli_packm_int( &b1, &b1_pack, cntl_sub_packm_b( cntl ) ); // Perform gemm subproblem. bli_gemm_int( &BLIS_ONE, &a1_pack, &b1_pack, &BLIS_ONE, &c_pack, cntl_sub_gemm( cntl ) ); // This variant executes multiple rank-k updates. Therefore, if the // internal beta scalar on matrix C is non-zero, we must use it // only for the first iteration (and then BLIS_ONE for all others). // And since c_pack is a local obj_t, we can simply overwrite the // internal beta scalar with BLIS_ONE once it has been used in the // first iteration. if ( i == 0 ) bli_obj_scalar_reset( &c_pack ); } // Unpack C (if C was packed). bli_unpackm_int( &c_pack, c, cntl_sub_unpackm_c( cntl ) ); // If any packing buffers were acquired within packm, release them back // to the memory manager. bli_obj_release_pack( &a1_pack ); bli_obj_release_pack( &b1_pack ); bli_obj_release_pack( &c_pack ); }
void bli_herk_blk_var3f( obj_t* a, obj_t* ah, obj_t* c, herk_t* cntl, herk_thrinfo_t* thread ) { obj_t c_pack_s; obj_t a1_pack_s, ah1_pack_s; obj_t a1, ah1; obj_t* a1_pack = NULL; obj_t* ah1_pack = NULL; obj_t* c_pack = NULL; dim_t i; dim_t b_alg; dim_t k_trans; if( thread_am_ochief( thread ) ) { // Initialize object for packing C. bli_obj_init_pack( &c_pack_s ); bli_packm_init( c, &c_pack_s, cntl_sub_packm_c( cntl ) ); // Scale C by beta (if instructed). bli_scalm_int( &BLIS_ONE, c, cntl_sub_scalm( cntl ) ); } c_pack = thread_obroadcast( thread, &c_pack_s ); // Initialize all pack objects that are passed into packm_init(). if( thread_am_ichief( thread ) ) { bli_obj_init_pack( &a1_pack_s ); bli_obj_init_pack( &ah1_pack_s ); } a1_pack = thread_ibroadcast( thread, &a1_pack_s ); ah1_pack = thread_ibroadcast( thread, &ah1_pack_s ); // Pack C (if instructed). bli_packm_int( c, c_pack, cntl_sub_packm_c( cntl ), herk_thread_sub_opackm( thread ) ); // Query dimension in partitioning direction. k_trans = bli_obj_width_after_trans( *a ); // Partition along the k dimension. for ( i = 0; i < k_trans; i += b_alg ) { // Determine the current algorithmic blocksize. b_alg = bli_determine_blocksize_f( i, k_trans, a, cntl_blocksize( cntl ) ); // Acquire partitions for A1 and A1'. bli_acquire_mpart_l2r( BLIS_SUBPART1, i, b_alg, a, &a1 ); bli_acquire_mpart_t2b( BLIS_SUBPART1, i, b_alg, ah, &ah1 ); // Initialize objects for packing A1 and A1'. if( thread_am_ichief( thread ) ) { bli_packm_init( &a1, a1_pack, cntl_sub_packm_a( cntl ) ); bli_packm_init( &ah1, ah1_pack, cntl_sub_packm_b( cntl ) ); } thread_ibarrier( thread ); // Pack A1 (if instructed). bli_packm_int( &a1, a1_pack, cntl_sub_packm_a( cntl ), herk_thread_sub_ipackm( thread ) ); // Pack B1 (if instructed). bli_packm_int( &ah1, ah1_pack, cntl_sub_packm_b( cntl ), herk_thread_sub_ipackm( thread ) ); // Perform herk subproblem. bli_herk_int( &BLIS_ONE, a1_pack, ah1_pack, &BLIS_ONE, c_pack, cntl_sub_herk( cntl ), herk_thread_sub_herk( thread ) ); // This variant executes multiple rank-k updates. Therefore, if the // internal beta scalar on matrix C is non-zero, we must use it // only for the first iteration (and then BLIS_ONE for all others). // And since c_pack is a local obj_t, we can simply overwrite the // internal beta scalar with BLIS_ONE once it has been used in the // first iteration. if ( i == 0 ) thread_ibarrier( thread ); if ( i == 0 && thread_am_ichief( thread ) ) bli_obj_scalar_reset( c_pack ); } thread_obarrier( thread ); // Unpack C (if C was packed). bli_unpackm_int( c_pack, c, cntl_sub_unpackm_c( cntl ), herk_thread_sub_opackm( thread ) ); // If any packing buffers were acquired within packm, release them back // to the memory manager. if( thread_am_ochief( thread ) ) { bli_obj_release_pack( c_pack ); } if( thread_am_ichief( thread ) ) { bli_obj_release_pack( a1_pack ); bli_obj_release_pack( ah1_pack ); } }
void bli_ger_blk_var2( obj_t* alpha, obj_t* x, obj_t* y, obj_t* a, cntx_t* cntx, ger_t* cntl ) { obj_t a1, a1_pack; obj_t y1, y1_pack; dim_t i; dim_t b_alg; dim_t n_trans; // Initialize objects for packing. bli_obj_init_pack( &a1_pack ); bli_obj_init_pack( &y1_pack ); // Query dimension in partitioning direction. n_trans = bli_obj_width_after_trans( *a ); // Partition along the n dimension. for ( i = 0; i < n_trans; i += b_alg ) { // Determine the current algorithmic blocksize. b_alg = bli_determine_blocksize_f( i, n_trans, a, bli_cntl_bszid( cntl ), cntx ); // Acquire partitions for A1 and y1. bli_acquire_mpart_l2r( BLIS_SUBPART1, i, b_alg, a, &a1 ); bli_acquire_vpart_f2b( BLIS_SUBPART1, i, b_alg, y, &y1 ); // Initialize objects for packing A1 and y1 (if needed). bli_packm_init( &a1, &a1_pack, cntx, bli_cntl_sub_packm_a( cntl ) ); bli_packv_init( &y1, &y1_pack, cntx, bli_cntl_sub_packv_y( cntl ) ); // Copy/pack A1, y1 (if needed). bli_packm_int( &a1, &a1_pack, cntx, bli_cntl_sub_packm_a( cntl ), &BLIS_PACKM_SINGLE_THREADED ); bli_packv_int( &y1, &y1_pack, cntx, bli_cntl_sub_packv_y( cntl ) ); // A1 = A1 + alpha * x * y1; bli_ger_int( BLIS_NO_CONJUGATE, BLIS_NO_CONJUGATE, alpha, x, &y1_pack, &a1_pack, cntx, bli_cntl_sub_ger( cntl ) ); // Copy/unpack A1 (if A1 was packed). bli_unpackm_int( &a1_pack, &a1, cntx, bli_cntl_sub_unpackm_a( cntl ), &BLIS_PACKM_SINGLE_THREADED ); } // If any packing buffers were acquired within packm, release them back // to the memory manager. bli_packm_release( &a1_pack, bli_cntl_sub_packm_a( cntl ) ); bli_packv_release( &y1_pack, bli_cntl_sub_packv_y( cntl ) ); }
void bli_trmm_blk_var1( obj_t* alpha, obj_t* a, obj_t* b, obj_t* beta, obj_t* c, trmm_t* cntl ) { obj_t a1, a1_pack; obj_t b_pack; obj_t c1, c1_pack; dim_t i; dim_t b_alg; dim_t m_trans; dim_t offA; // Initialize all pack objects that are passed into packm_init(). bli_obj_init_pack( &a1_pack ); bli_obj_init_pack( &b_pack ); bli_obj_init_pack( &c1_pack ); // Set the default length of and offset to the non-zero part of A. m_trans = bli_obj_length_after_trans( *a ); offA = 0; // If A is lower triangular, we have to adjust where the non-zero part of // A begins. If A is upper triangular, we have to adjust the length of // the non-zero part. If A is general/dense, then we keep the defaults. if ( bli_obj_is_lower( *a ) ) offA = bli_abs( bli_obj_diag_offset_after_trans( *a ) ); else if ( bli_obj_is_upper( *a ) ) m_trans = bli_abs( bli_obj_diag_offset_after_trans( *a ) ) + bli_obj_width_after_trans( *a ); // Scale C by beta (if instructed). bli_scalm_int( beta, c, cntl_sub_scalm( cntl ) ); // Initialize object for packing B. bli_packm_init( b, &b_pack, cntl_sub_packm_b( cntl ) ); // Pack B and scale by alpha (if instructed). bli_packm_int( alpha, b, &b_pack, cntl_sub_packm_b( cntl ) ); // Partition along the m dimension. for ( i = offA; i < m_trans; i += b_alg ) { // Determine the current algorithmic blocksize. b_alg = bli_determine_blocksize_f( i, m_trans, a, cntl_blocksize( cntl ) ); // Acquire partitions for A1 and C1. bli_acquire_mpart_t2b( BLIS_SUBPART1, i, b_alg, a, &a1 ); bli_acquire_mpart_t2b( BLIS_SUBPART1, i, b_alg, c, &c1 ); // Initialize objects for packing A1 and C1. bli_packm_init( &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); bli_packm_init( &c1, &c1_pack, cntl_sub_packm_c( cntl ) ); // Pack A1 and scale by alpha (if instructed). bli_packm_int( alpha, &a1, &a1_pack, cntl_sub_packm_a( cntl ) ); // Pack C1 and scale by beta (if instructed). bli_packm_int( beta, &c1, &c1_pack, cntl_sub_packm_c( cntl ) ); // Perform trmm subproblem. bli_trmm_int( alpha, &a1_pack, &b_pack, beta, &c1_pack, cntl_sub_trmm( cntl ) ); // Unpack C1 (if C1 was packed). bli_unpackm_int( &c1_pack, &c1, cntl_sub_unpackm_c( cntl ) ); } // If any packing buffers were acquired within packm, release them back // to the memory manager. bli_obj_release_pack( &a1_pack ); bli_obj_release_pack( &b_pack ); bli_obj_release_pack( &c1_pack ); }