Esempio n. 1
0
void bli_her2k_u_ker_var2( obj_t*   a,
                           obj_t*   bh,
                           obj_t*   b,
                           obj_t*   ah,
                           obj_t*   c,
                           her2k_t* cntl )
{
	herk_t herk_cntl;
	obj_t  c_local;

	// Implement her2k kernel in terms of two calls to the corresponding
	// herk kernel.

	// Note we have to use BLIS_ONE for the second rank-k product since we
	// only want to apply beta once. (And beta might be unit anyway if this
	// is not the first iteration of variant 3.)

	cntl_gemm_ukrs( (&herk_cntl) ) = cntl_gemm_ukrs( cntl );

	bli_obj_alias_to( *c, c_local );

	bli_herk_u_ker_var2( a,
	                     bh,
	                     &c_local,
	                     &herk_cntl );

	bli_obj_scalar_reset( &c_local );

	bli_herk_u_ker_var2( b,
	                     ah,
	                     &c_local,
	                     &herk_cntl );
}
Esempio n. 2
0
void blx_gemm_blk_var3
     (
       obj_t*  a,
       obj_t*  b,
       obj_t*  c,
       cntx_t* cntx,
       rntm_t* rntm,
       cntl_t* cntl,
       thrinfo_t* thread
     )
{
	obj_t a1, b1;
	dim_t i;
	dim_t b_alg;
	dim_t k_trans;

	// Query dimension in partitioning direction.
	k_trans = bli_obj_width_after_trans( a );

	// Partition along the k dimension.
	for ( i = 0; i < k_trans; i += b_alg )
	{
		// Determine the current algorithmic blocksize.
		b_alg = blx_determine_blocksize_f( i, k_trans, c,
		                                   bli_cntl_bszid( cntl ), cntx );

		// Acquire partitions for A1 and B1.
		bli_acquire_mpart_ndim( BLIS_FWD, BLIS_SUBPART1, i, b_alg, a, &a1 );
		bli_acquire_mpart_mdim( BLIS_FWD, BLIS_SUBPART1, i, b_alg, b, &b1 );

		// Perform gemm subproblem.
		blx_gemm_int
		(
		  &a1, &b1, c, cntx, rntm,
		  bli_cntl_sub_node( cntl ),
		  bli_thrinfo_sub_node( thread )
		);

		bli_thread_obarrier( bli_thrinfo_sub_node( thread ) );

		// This variant executes multiple rank-k updates. Therefore, if the
		// internal beta scalar on matrix C is non-zero, we must use it
		// only for the first iteration (and then BLIS_ONE for all others).
		// And since c is a locally aliased obj_t, we can simply overwrite
		// the internal beta scalar with BLIS_ONE once it has been used in
		// the first iteration. 
		if ( i == 0 ) bli_obj_scalar_reset( c );
	}
}
Esempio n. 3
0
void bli_gemm_blk_var3f( obj_t*  a,
                         obj_t*  b,
                         obj_t*  c,
                         gemm_t* cntl )
{
	obj_t  a1, a1_pack;
	obj_t  b1, b1_pack;
	obj_t  c_pack;

	dim_t  i;
	dim_t  b_alg;
	dim_t  k_trans;

	// Initialize all pack objects that are passed into packm_init().
	bli_obj_init_pack( &a1_pack );
	bli_obj_init_pack( &b1_pack );
	bli_obj_init_pack( &c_pack );

	// Query dimension in partitioning direction.
	k_trans = bli_obj_width_after_trans( *a );

	// Scale C by beta (if instructed).
	bli_scalm_int( &BLIS_ONE,
	               c,
	               cntl_sub_scalm( cntl ) );

	// Initialize object for packing C.
	bli_packm_init( c, &c_pack,
	                cntl_sub_packm_c( cntl ) );

	// Pack C (if instructed).
	bli_packm_int( c, &c_pack,
	               cntl_sub_packm_c( cntl ) );

	// Partition along the k dimension.
	for ( i = 0; i < k_trans; i += b_alg )
	{
		// Determine the current algorithmic blocksize.
		// NOTE: Use of b (for execution datatype) is intentional!
		// This causes the right blocksize to be used if c and a are
		// complex and b is real.
		b_alg = bli_determine_blocksize_f( i, k_trans, b,
		                                   cntl_blocksize( cntl ) );

		// Acquire partitions for A1 and B1.
		bli_acquire_mpart_l2r( BLIS_SUBPART1,
		                       i, b_alg, a, &a1 );
		bli_acquire_mpart_t2b( BLIS_SUBPART1,
		                       i, b_alg, b, &b1 );

		// Initialize objects for packing A1 and B1.
		bli_packm_init( &a1, &a1_pack,
		                cntl_sub_packm_a( cntl ) );
		bli_packm_init( &b1, &b1_pack,
		                cntl_sub_packm_b( cntl ) );

		// Pack A1 (if instructed).
		bli_packm_int( &a1, &a1_pack,
		               cntl_sub_packm_a( cntl ) );

		// Pack B1 (if instructed).
		bli_packm_int( &b1, &b1_pack,
		               cntl_sub_packm_b( cntl ) );

		// Perform gemm subproblem.
		bli_gemm_int( &BLIS_ONE,
		              &a1_pack,
		              &b1_pack,
		              &BLIS_ONE,
		              &c_pack,
		              cntl_sub_gemm( cntl ) );

		// This variant executes multiple rank-k updates. Therefore, if the
		// internal beta scalar on matrix C is non-zero, we must use it
		// only for the first iteration (and then BLIS_ONE for all others).
		// And since c_pack is a local obj_t, we can simply overwrite the
		// internal beta scalar with BLIS_ONE once it has been used in the
		// first iteration.
		if ( i == 0 ) bli_obj_scalar_reset( &c_pack );
	}

	// Unpack C (if C was packed).
	bli_unpackm_int( &c_pack, c,
	                 cntl_sub_unpackm_c( cntl ) );

	// If any packing buffers were acquired within packm, release them back
	// to the memory manager.
	bli_obj_release_pack( &a1_pack );
	bli_obj_release_pack( &b1_pack );
	bli_obj_release_pack( &c_pack );
}
Esempio n. 4
0
void bli_trsm_blk_var3b( obj_t*  a,
                         obj_t*  b,
                         obj_t*  c,
                         trsm_t* cntl,
                         trsm_thrinfo_t* thread )
{
    obj_t  c_pack_s;
    obj_t  a1_pack_s, b1_pack_s;

    obj_t  a1, b1;
    obj_t* a1_pack = NULL;
    obj_t* b1_pack = NULL;
    obj_t* c_pack = NULL;

	dim_t  i;
	dim_t  b_alg;
	dim_t  k_trans;

	// Prune any zero region that exists along the partitioning dimension.
	bli_trsm_prune_unref_mparts_k( a, b, c );

	// Initialize pack objects for C that are passed into packm_init().
    if( thread_am_ochief( thread ) ) {
	    bli_obj_init_pack( &c_pack_s );

        // Initialize object for packing C.
        bli_packm_init( c, &c_pack_s,
                        cntl_sub_packm_c( cntl ) );

        // Scale C by beta (if instructed).
        bli_scalm_int( &BLIS_ONE,
                       c,
                       cntl_sub_scalm( cntl ) );
    }
    c_pack = thread_obroadcast( thread, &c_pack_s );

    if( thread_am_ichief( thread ) ) {
        bli_obj_init_pack( &a1_pack_s );
        bli_obj_init_pack( &b1_pack_s );
    }
    a1_pack = thread_ibroadcast( thread, &a1_pack_s );
    b1_pack = thread_ibroadcast( thread, &b1_pack_s );

	// Pack C (if instructed).
	bli_packm_int( c, c_pack,
	               cntl_sub_packm_c( cntl ),
                   trsm_thread_sub_opackm( thread ) );

	// Query dimension in partitioning direction.
	k_trans = bli_obj_width_after_trans( *a );

	// Partition along the k dimension.
	for ( i = 0; i < k_trans; i += b_alg )
	{
		// Determine the current algorithmic blocksize.
		// NOTE: We call a trsm-specific function to determine the kc
		// blocksize so that we can implement the "nudging" of kc to be
		// a multiple of mr, as needed.
		b_alg = bli_trsm_determine_kc_b( i, k_trans, b,
		                                 cntl_blocksize( cntl ) );

		// Acquire partitions for A1 and B1.
		bli_acquire_mpart_r2l( BLIS_SUBPART1,
		                       i, b_alg, a, &a1 );
		bli_acquire_mpart_b2t( BLIS_SUBPART1,
		                       i, b_alg, b, &b1 );

		// Initialize objects for packing A1 and B1.
        if( thread_am_ichief( thread ) ) {
            bli_packm_init( &a1, a1_pack,
                            cntl_sub_packm_a( cntl ) );
            bli_packm_init( &b1, b1_pack,
                            cntl_sub_packm_b( cntl ) );
        }
        thread_ibarrier( thread );

		// Pack A1 (if instructed).
		bli_packm_int( &a1, a1_pack,
		               cntl_sub_packm_a( cntl ),
                       trsm_thread_sub_ipackm( thread ) );

		// Pack B1 (if instructed).
		bli_packm_int( &b1, b1_pack,
		               cntl_sub_packm_b( cntl ),
                       trsm_thread_sub_ipackm( thread ) );

		// Perform trsm subproblem.
		bli_trsm_int( &BLIS_ONE,
		              a1_pack,
		              b1_pack,
		              &BLIS_ONE,
		              c_pack,
		              cntl_sub_trsm( cntl ),
                      trsm_thread_sub_trsm( thread ) );

		// This variant executes multiple rank-k updates. Therefore, if the
		// internal alpha scalars on A/B and C are non-zero, we must ensure
		// that they are only used in the first iteration.
        thread_ibarrier( thread );
		if ( i == 0 && thread_am_ichief( thread ) ) {
            bli_obj_scalar_reset( a );
            bli_obj_scalar_reset( b );
            bli_obj_scalar_reset( c_pack );
        }
	}

    thread_obarrier( thread );

	// Unpack C (if C was packed).
    bli_unpackm_int( c_pack, c,
                     cntl_sub_unpackm_c( cntl ),
                     trsm_thread_sub_opackm( thread ) );

	// If any packing buffers were acquired within packm, release them back
	// to the memory manager.
    if( thread_am_ochief( thread ) ) {
	    bli_packm_release( c_pack, cntl_sub_packm_c( cntl ) );
    }
    if( thread_am_ichief( thread ) ) {
        bli_packm_release( a1_pack, cntl_sub_packm_a( cntl ) );
        bli_packm_release( b1_pack, cntl_sub_packm_b( cntl ) );
    }
}
Esempio n. 5
0
void bli_herk_blk_var3f( obj_t*  a,
                         obj_t*  ah,
                         obj_t*  c,
                         herk_t* cntl,
                         herk_thrinfo_t* thread )
{
    obj_t  c_pack_s;
    obj_t  a1_pack_s, ah1_pack_s;

	obj_t  a1, ah1;
    obj_t* a1_pack = NULL;
    obj_t* ah1_pack = NULL;
	obj_t* c_pack = NULL;

	dim_t  i;
	dim_t  b_alg;
	dim_t  k_trans;

    if( thread_am_ochief( thread ) ) {
        // Initialize object for packing C.
	    bli_obj_init_pack( &c_pack_s );
        bli_packm_init( c, &c_pack_s,
                        cntl_sub_packm_c( cntl ) );
        
        // Scale C by beta (if instructed).
        bli_scalm_int( &BLIS_ONE,
                       c,
                       cntl_sub_scalm( cntl ) );
    }
    c_pack = thread_obroadcast( thread, &c_pack_s );

	// Initialize all pack objects that are passed into packm_init().
    if( thread_am_ichief( thread ) ) {
        bli_obj_init_pack( &a1_pack_s );
        bli_obj_init_pack( &ah1_pack_s );
    }
    a1_pack = thread_ibroadcast( thread, &a1_pack_s );
    ah1_pack = thread_ibroadcast( thread, &ah1_pack_s );

	// Pack C (if instructed).
	bli_packm_int( c, c_pack,
	               cntl_sub_packm_c( cntl ),
                   herk_thread_sub_opackm( thread ) );

	// Query dimension in partitioning direction.
	k_trans = bli_obj_width_after_trans( *a );

	// Partition along the k dimension.
	for ( i = 0; i < k_trans; i += b_alg )
	{
		// Determine the current algorithmic blocksize.
		b_alg = bli_determine_blocksize_f( i, k_trans, a,
		                                   cntl_blocksize( cntl ) );

		// Acquire partitions for A1 and A1'.
		bli_acquire_mpart_l2r( BLIS_SUBPART1,
		                       i, b_alg, a, &a1 );
		bli_acquire_mpart_t2b( BLIS_SUBPART1,
		                       i, b_alg, ah, &ah1 );

		// Initialize objects for packing A1 and A1'.
        if( thread_am_ichief( thread ) ) {
            bli_packm_init( &a1, a1_pack,
                            cntl_sub_packm_a( cntl ) );
            bli_packm_init( &ah1, ah1_pack,
                            cntl_sub_packm_b( cntl ) );
        }
        thread_ibarrier( thread );

		// Pack A1 (if instructed).
		bli_packm_int( &a1, a1_pack,
		               cntl_sub_packm_a( cntl ),
                       herk_thread_sub_ipackm( thread ) );

		// Pack B1 (if instructed).
		bli_packm_int( &ah1, ah1_pack,
		               cntl_sub_packm_b( cntl ),
                       herk_thread_sub_ipackm( thread ) );

		// Perform herk subproblem.
		bli_herk_int( &BLIS_ONE,
		              a1_pack,
		              ah1_pack,
		              &BLIS_ONE,
		              c_pack,
		              cntl_sub_herk( cntl ),
                      herk_thread_sub_herk( thread ) );

        // This variant executes multiple rank-k updates. Therefore, if the
        // internal beta scalar on matrix C is non-zero, we must use it
        // only for the first iteration (and then BLIS_ONE for all others).
        // And since c_pack is a local obj_t, we can simply overwrite the
        // internal beta scalar with BLIS_ONE once it has been used in the
        // first iteration.
        if ( i == 0 ) thread_ibarrier( thread );
        if ( i == 0 && thread_am_ichief( thread ) ) bli_obj_scalar_reset( c_pack );

	}

    thread_obarrier( thread );
    
	// Unpack C (if C was packed).
    bli_unpackm_int( c_pack, c,
                     cntl_sub_unpackm_c( cntl ),
                     herk_thread_sub_opackm( thread ) );

	// If any packing buffers were acquired within packm, release them back
	// to the memory manager.
    if( thread_am_ochief( thread ) ) {
	    bli_obj_release_pack( c_pack );
    }
    if( thread_am_ichief( thread ) ) {
        bli_obj_release_pack( a1_pack );
        bli_obj_release_pack( ah1_pack );
    }
}
void bli_packm_blk_var1( obj_t*   c,
                         obj_t*   p,
                         packm_thrinfo_t* t )
{
	num_t     dt_cp      = bli_obj_datatype( *c );

	struc_t   strucc     = bli_obj_struc( *c );
	doff_t    diagoffc   = bli_obj_diag_offset( *c );
	diag_t    diagc      = bli_obj_diag( *c );
	uplo_t    uploc      = bli_obj_uplo( *c );
	trans_t   transc     = bli_obj_conjtrans_status( *c );
	pack_t    schema     = bli_obj_pack_schema( *p );
	bool_t    invdiag    = bli_obj_has_inverted_diag( *p );
	bool_t    revifup    = bli_obj_is_pack_rev_if_upper( *p );
	bool_t    reviflo    = bli_obj_is_pack_rev_if_lower( *p );

	dim_t     m_p        = bli_obj_length( *p );
	dim_t     n_p        = bli_obj_width( *p );
	dim_t     m_max_p    = bli_obj_padded_length( *p );
	dim_t     n_max_p    = bli_obj_padded_width( *p );

	void*     buf_c      = bli_obj_buffer_at_off( *c );
	inc_t     rs_c       = bli_obj_row_stride( *c );
	inc_t     cs_c       = bli_obj_col_stride( *c );

	void*     buf_p      = bli_obj_buffer_at_off( *p );
	inc_t     rs_p       = bli_obj_row_stride( *p );
	inc_t     cs_p       = bli_obj_col_stride( *p );
	inc_t     is_p       = bli_obj_imag_stride( *p );
	dim_t     pd_p       = bli_obj_panel_dim( *p );
	inc_t     ps_p       = bli_obj_panel_stride( *p );

	obj_t     kappa;
	/*---initialize pointer to stop gcc complaining  2-9-16 GH --- */
	obj_t*    kappa_p = {0};
	void*     buf_kappa;

	func_t*   packm_kers;
	void*     packm_ker;

	FUNCPTR_T f;

	// Treatment of kappa (ie: packing during scaling) depends on
	// whether we are executing an induced method.
	if ( bli_is_ind_packed( schema ) )
	{
		// The value for kappa we use will depend on whether the scalar
		// attached to A has a nonzero imaginary component. If it does,
		// then we will apply the scalar during packing to facilitate
		// implementing induced complex domain algorithms in terms of
		// real domain micro-kernels. (In the aforementioned situation,
		// applying a real scalar is easy, but applying a complex one is
		// harder, so we avoid the need altogether with the code below.)
		if( thread_am_ochief( t ) )
		{
			if ( bli_obj_scalar_has_nonzero_imag( p ) )
			{
				// Detach the scalar.
				bli_obj_scalar_detach( p, &kappa );

				// Reset the attached scalar (to 1.0).
				bli_obj_scalar_reset( p );

				kappa_p = &kappa;
			}
			else
			{
				// If the internal scalar of A has only a real component, then
				// we will apply it later (in the micro-kernel), and so we will
				// use BLIS_ONE to indicate no scaling during packing.
				kappa_p = &BLIS_ONE;
			}
		}
		kappa_p = thread_obroadcast( t, kappa_p );

		// Acquire the buffer to the kappa chosen above.
		buf_kappa = bli_obj_buffer_for_1x1( dt_cp, *kappa_p );
	}
	else // if ( bli_is_nat_packed( schema ) )
	{
		// This branch if for native execution, where we assume that
		// the micro-kernel will always apply the alpha scalar of the
		// higher-level operation. Thus, we use BLIS_ONE for kappa so
		// that the underlying packm implementation does not perform
		// any scaling during packing.
		buf_kappa = bli_obj_buffer_for_const( dt_cp, BLIS_ONE );
	}


	// Choose the correct func_t object based on the pack_t schema.
	if      ( bli_is_4mi_packed( schema ) ) packm_kers = packm_struc_cxk_4mi_kers;
	else if ( bli_is_3mi_packed( schema ) ||
	          bli_is_3ms_packed( schema ) ) packm_kers = packm_struc_cxk_3mis_kers;
	else if ( bli_is_ro_packed( schema ) ||
	          bli_is_io_packed( schema ) ||
	         bli_is_rpi_packed( schema ) )  packm_kers = packm_struc_cxk_rih_kers;
	else                                    packm_kers = packm_struc_cxk_kers;

	// Query the datatype-specific function pointer from the func_t object.
	packm_ker = bli_func_obj_query( dt_cp, packm_kers );


	// Index into the type combination array to extract the correct
	// function pointer.
	f = ftypes[dt_cp];

	// Invoke the function.
	f( strucc,
	   diagoffc,
	   diagc,
	   uploc,
	   transc,
	   schema,
	   invdiag,
	   revifup,
	   reviflo,
	   m_p,
	   n_p,
	   m_max_p,
	   n_max_p,
	   buf_kappa,
	   buf_c, rs_c, cs_c,
	   buf_p, rs_p, cs_p,
	          is_p,
	          pd_p, ps_p,
	   packm_ker,
	   t );
}
Esempio n. 7
0
void bli_gemm_blk_var4f( obj_t*  a,
                         obj_t*  b,
                         obj_t*  c,
                         gemm_t* cntl,
                         gemm_thrinfo_t* thread )
{
	extern packm_t* gemm3mh_packa_cntl_ro;
	extern packm_t* gemm3mh_packa_cntl_io;
	extern packm_t* gemm3mh_packa_cntl_rpi;

	packm_t* packa_cntl_ro  = gemm3mh_packa_cntl_ro;
	packm_t* packa_cntl_io  = gemm3mh_packa_cntl_io;
	packm_t* packa_cntl_rpi = gemm3mh_packa_cntl_rpi;

    //The s is for "lives on the stack"
    obj_t b_pack_s;
    obj_t a1_pack_s, c1_pack_s;

    obj_t a1, c1;
    obj_t* a1_pack  = NULL;
    obj_t* b_pack   = NULL;
    obj_t* c1_pack  = NULL;

	dim_t i;
	dim_t b_alg;
	dim_t m_trans;

    if( thread_am_ochief( thread ) ) {
	    // Initialize object for packing B.
	    bli_obj_init_pack( &b_pack_s );
	    bli_packm_init( b, &b_pack_s,
	                    cntl_sub_packm_b( cntl ) );

        // Scale C by beta (if instructed).
        // Since scalm doesn't support multithreading yet, must be done by chief thread (ew)
        bli_scalm_int( &BLIS_ONE,
                       c,
                       cntl_sub_scalm( cntl ) );
    }
    b_pack = thread_obroadcast( thread, &b_pack_s );

	// Initialize objects passed into bli_packm_init for A and C
    if( thread_am_ichief( thread ) ) {
        bli_obj_init_pack( &a1_pack_s );
        bli_obj_init_pack( &c1_pack_s );
    }
    a1_pack = thread_ibroadcast( thread, &a1_pack_s );
    c1_pack = thread_ibroadcast( thread, &c1_pack_s );

	// Pack B (if instructed).
	bli_packm_int( b, b_pack,
	               cntl_sub_packm_b( cntl ),
                   gemm_thread_sub_opackm( thread ) );

	// Query dimension in partitioning direction.
	m_trans = bli_obj_length_after_trans( *a );
    dim_t start, end;
    bli_get_range_t2b( thread, 0, m_trans,
                       bli_blksz_get_mult_for_obj( a, cntl_blocksize( cntl ) ),
                       &start, &end );

	// Partition along the m dimension.
	for ( i = start; i < end; i += b_alg )
	{
		// Determine the current algorithmic blocksize.
		// NOTE: Use of a (for execution datatype) is intentional!
		// This causes the right blocksize to be used if c and a are
		// complex and b is real.
		b_alg = bli_determine_blocksize_f( i, end, a,
		                                   cntl_blocksize( cntl ) );

		// Acquire partitions for A1 and C1.
		bli_acquire_mpart_t2b( BLIS_SUBPART1,
		                       i, b_alg, a, &a1 );
		bli_acquire_mpart_t2b( BLIS_SUBPART1,
		                       i, b_alg, c, &c1 );
		


        // Initialize objects for packing A1 and C1.
        if( thread_am_ichief( thread ) ) {
            bli_packm_init( &a1, a1_pack,
                            packa_cntl_ro );
            bli_packm_init( &c1, c1_pack,
                            cntl_sub_packm_c( cntl ) );
        }
        thread_ibarrier( thread );

		// Pack A1 (if instructed).
		bli_packm_int( &a1, a1_pack,
		               packa_cntl_ro,
                       gemm_thread_sub_ipackm( thread ) );

		// Pack C1 (if instructed).
		bli_packm_int( &c1, c1_pack,
		               cntl_sub_packm_c( cntl ),
                       gemm_thread_sub_ipackm( thread ) );

		// Perform gemm subproblem.
		bli_gemm_int( &BLIS_ONE,
		              a1_pack,
		              b_pack,
		              &BLIS_ONE,
		              c1_pack,
		              cntl_sub_gemm( cntl ),
                      gemm_thread_sub_gemm( thread ) );

        thread_ibarrier( thread );

		// Only apply beta within the first of three subproblems.
		if ( thread_am_ichief( thread ) ) bli_obj_scalar_reset( c1_pack );


        // Initialize objects for packing A1 and C1.
        if( thread_am_ichief( thread ) ) {
            bli_packm_init( &a1, a1_pack,
                            packa_cntl_io );
        }
        thread_ibarrier( thread );

		// Pack A1 (if instructed).
		bli_packm_int( &a1, a1_pack,
		               packa_cntl_io,
                       gemm_thread_sub_ipackm( thread ) );

		// Perform gemm subproblem.
		bli_gemm_int( &BLIS_ONE,
		              a1_pack,
		              b_pack,
		              &BLIS_ONE,
		              c1_pack,
		              cntl_sub_gemm( cntl ),
                      gemm_thread_sub_gemm( thread ) );

        thread_ibarrier( thread );


        // Initialize objects for packing A1 and C1.
        if( thread_am_ichief( thread ) ) {
            bli_packm_init( &a1, a1_pack,
                            packa_cntl_rpi );
        }
        thread_ibarrier( thread );

		// Pack A1 (if instructed).
		bli_packm_int( &a1, a1_pack,
		               packa_cntl_rpi,
                       gemm_thread_sub_ipackm( thread ) );

		// Perform gemm subproblem.
		bli_gemm_int( &BLIS_ONE,
		              a1_pack,
		              b_pack,
		              &BLIS_ONE,
		              c1_pack,
		              cntl_sub_gemm( cntl ),
                      gemm_thread_sub_gemm( thread ) );

        thread_ibarrier( thread );


		// Unpack C1 (if C1 was packed).
        // Currently must be done by 1 thread
        bli_unpackm_int( c1_pack, &c1,
                         cntl_sub_unpackm_c( cntl ),
                         gemm_thread_sub_ipackm( thread ) );
	}

	// If any packing buffers were acquired within packm, release them back
	// to the memory manager.
    thread_obarrier( thread );
    if( thread_am_ochief( thread ) )
	    bli_packm_release( b_pack, cntl_sub_packm_b( cntl ) );
    if( thread_am_ichief( thread ) ){
		// It doesn't matter which packm cntl node we pass in, as long
		// as it is valid, packm_release() will release the mem_t entry.
        bli_packm_release( a1_pack, packa_cntl_ro );
        bli_packm_release( c1_pack, cntl_sub_packm_c( cntl ) );
    }
}
Esempio n. 8
0
void bli_trsm_blk_var3
     (
       obj_t*  a,
       obj_t*  b,
       obj_t*  c,
       cntx_t* cntx,
       cntl_t* cntl,
       thrinfo_t* thread
     )
{
	obj_t a1, b1;

	dir_t direct;

	dim_t i;
	dim_t b_alg;
	dim_t k_trans;

	// Determine the direction in which to partition (forwards or backwards).
	direct = bli_l3_direct( a, b, c, cntl );

	// Prune any zero region that exists along the partitioning dimension.
	bli_l3_prune_unref_mparts_k( a, b, c, cntl );

	// Query dimension in partitioning direction.
	k_trans = bli_obj_width_after_trans( *a );

	// Partition along the k dimension.
	for ( i = 0; i < k_trans; i += b_alg )
	{
		// Determine the current algorithmic blocksize.
		b_alg = bli_trsm_determine_kc( direct, i, k_trans, a, b,
		                               bli_cntl_bszid( cntl ), cntx );

		// Acquire partitions for A1 and B1.
		bli_acquire_mpart_ndim( direct, BLIS_SUBPART1,
		                        i, b_alg, a, &a1 );
		bli_acquire_mpart_mdim( direct, BLIS_SUBPART1,
		                        i, b_alg, b, &b1 );

		// Perform trsm subproblem.
		bli_trsm_int
		(
		  &BLIS_ONE,
		  &a1,
		  &b1,
		  &BLIS_ONE,
		  c,
		  cntx,
		  bli_cntl_sub_node( cntl ),
		  bli_thrinfo_sub_node( thread )
		);

		//bli_thread_ibarrier( thread );
		bli_thread_obarrier( bli_thrinfo_sub_node( thread ) );

		// This variant executes multiple rank-k updates. Therefore, if the
		// internal alpha scalars on A/B and C are non-zero, we must ensure
		// that they are only used in the first iteration.
		if ( i == 0 )
		{
			bli_obj_scalar_reset( a ); bli_obj_scalar_reset( b );
			bli_obj_scalar_reset( c );
		}
	}
}
Esempio n. 9
0
void bli_packm_blk_var1_md
     (
       obj_t*   c,
       obj_t*   p,
       cntx_t*  cntx,
       cntl_t*  cntl,
       thrinfo_t* t
     )
{
	num_t     dt_c       = bli_obj_dt( c );
	num_t     dt_p       = bli_obj_dt( p );

	trans_t   transc     = bli_obj_conjtrans_status( c );
	pack_t    schema     = bli_obj_pack_schema( p );

	dim_t     m_p        = bli_obj_length( p );
	dim_t     n_p        = bli_obj_width( p );
	dim_t     m_max_p    = bli_obj_padded_length( p );
	dim_t     n_max_p    = bli_obj_padded_width( p );

	void*     buf_c      = bli_obj_buffer_at_off( c );
	inc_t     rs_c       = bli_obj_row_stride( c );
	inc_t     cs_c       = bli_obj_col_stride( c );

	void*     buf_p      = bli_obj_buffer_at_off( p );
	inc_t     rs_p       = bli_obj_row_stride( p );
	inc_t     cs_p       = bli_obj_col_stride( p );
	inc_t     is_p       = bli_obj_imag_stride( p );
	dim_t     pd_p       = bli_obj_panel_dim( p );
	inc_t     ps_p       = bli_obj_panel_stride( p );

	obj_t     kappa;
	void*     buf_kappa;

	FUNCPTR_T f;


	// Treatment of kappa (ie: packing during scaling) depends on
	// whether we are executing an induced method.
	if ( bli_is_nat_packed( schema ) )
	{
		// This branch is for native execution, where we assume that
		// the micro-kernel will always apply the alpha scalar of the
		// higher-level operation. Thus, we use BLIS_ONE for kappa so
		// that the underlying packm implementation does not perform
		// any scaling during packing.
		buf_kappa = bli_obj_buffer_for_const( dt_p, &BLIS_ONE );
	}
	else // if ( bli_is_ind_packed( schema ) )
	{
		obj_t* kappa_p;

		// The value for kappa we use will depend on whether the scalar
		// attached to A has a nonzero imaginary component. If it does,
		// then we will apply the scalar during packing to facilitate
		// implementing induced complex domain algorithms in terms of
		// real domain micro-kernels. (In the aforementioned situation,
		// applying a real scalar is easy, but applying a complex one is
		// harder, so we avoid the need altogether with the code below.)
		if ( bli_obj_scalar_has_nonzero_imag( p ) )
		{
			// Detach the scalar.
			bli_obj_scalar_detach( p, &kappa );

			// Reset the attached scalar (to 1.0).
			bli_obj_scalar_reset( p );

			kappa_p = &kappa;
		}
		else
		{
			// If the internal scalar of A has only a real component, then
			// we will apply it later (in the micro-kernel), and so we will
			// use BLIS_ONE to indicate no scaling during packing.
			kappa_p = &BLIS_ONE;
		}

		// Acquire the buffer to the kappa chosen above.
		buf_kappa = bli_obj_buffer_for_1x1( dt_p, kappa_p );
	}


	// Index into the type combination array to extract the correct
	// function pointer.
	f = ftypes[dt_c][dt_p];

	// Invoke the function.
	f(
	   transc,
	   schema,
	   m_p,
	   n_p,
	   m_max_p,
	   n_max_p,
	   buf_kappa,
	   buf_c, rs_c, cs_c,
	   buf_p, rs_p, cs_p,
	          is_p,
	          pd_p, ps_p,
	   cntx,
	   t );
}
Esempio n. 10
0
void bli_gemm_blk_var3f( obj_t*  a,
                         obj_t*  b,
                         obj_t*  c,
                         gemm_t* cntl,
                         gemm_thrinfo_t* thread )
{
    obj_t  c_pack_s;
    obj_t  a1_pack_s, b1_pack_s;

    obj_t  a1, b1;
    obj_t* a1_pack = NULL;
    obj_t* b1_pack = NULL;
    obj_t* c_pack = NULL;

	dim_t  i;
	dim_t  b_alg;
	dim_t  k_trans;

    if( thread_am_ochief( thread ) ){
        // Initialize object for packing C
	    bli_obj_init_pack( &c_pack_s );
        bli_packm_init( c, &c_pack_s,
                        cntl_sub_packm_c( cntl ) );

        // Scale C by beta (if instructed).
        bli_scalm_int( &BLIS_ONE,
                       c,
                       cntl_sub_scalm( cntl ) );
    }
    c_pack = thread_obroadcast( thread, &c_pack_s );

    // Initialize pack objects for A and B that are passed into packm_init().
    if( thread_am_ichief( thread ) ){
        bli_obj_init_pack( &a1_pack_s );
        bli_obj_init_pack( &b1_pack_s );
    }
    a1_pack = thread_ibroadcast( thread, &a1_pack_s );
    b1_pack = thread_ibroadcast( thread, &b1_pack_s );

	// Pack C (if instructed).
	bli_packm_int( c, c_pack,
	               cntl_sub_packm_c( cntl ),
                   gemm_thread_sub_opackm( thread ) );

	// Query dimension in partitioning direction.
	k_trans = bli_obj_width_after_trans( *a );

	// Partition along the k dimension.
	for ( i = 0; i < k_trans; i += b_alg )
	{
		// Determine the current algorithmic blocksize.
		// NOTE: We call a gemm/hemm/symm-specific function to determine
		// the kc blocksize so that we can implement the "nudging" of kc
		// to be a multiple of mr or nr, as needed.
		b_alg = bli_gemm_determine_kc_f( i, k_trans, a, b,
		                                 cntl_blocksize( cntl ) );

		// Acquire partitions for A1 and B1.
		bli_acquire_mpart_l2r( BLIS_SUBPART1,
		                       i, b_alg, a, &a1 );
		bli_acquire_mpart_t2b( BLIS_SUBPART1,
		                       i, b_alg, b, &b1 );

		// Initialize objects for packing A1 and B1.
        if( thread_am_ichief( thread ) ) {
            bli_packm_init( &a1, a1_pack,
                            cntl_sub_packm_a( cntl ) );
            bli_packm_init( &b1, b1_pack,
                            cntl_sub_packm_b( cntl ) );
        }
        thread_ibarrier( thread );

		// Pack A1 (if instructed).
		bli_packm_int( &a1, a1_pack,
		               cntl_sub_packm_a( cntl ),
                       gemm_thread_sub_ipackm( thread ) );

		// Pack B1 (if instructed).
		bli_packm_int( &b1, b1_pack,
		               cntl_sub_packm_b( cntl ),
                       gemm_thread_sub_ipackm( thread ) );

		// Perform gemm subproblem.
		bli_gemm_int( &BLIS_ONE,
		              a1_pack,
		              b1_pack,
		              &BLIS_ONE,
		              c_pack,
		              cntl_sub_gemm( cntl ),
                      gemm_thread_sub_gemm( thread) );

		// This variant executes multiple rank-k updates. Therefore, if the
		// internal beta scalar on matrix C is non-zero, we must use it
		// only for the first iteration (and then BLIS_ONE for all others).
		// And since c_pack is a local obj_t, we can simply overwrite the
		// internal beta scalar with BLIS_ONE once it has been used in the
		// first iteration.
        thread_ibarrier( thread );
		if ( i == 0 && thread_am_ichief( thread ) ) bli_obj_scalar_reset( c_pack );

	}

    thread_obarrier( thread );

	// Unpack C (if C was packed).
    bli_unpackm_int( c_pack, c,
                     cntl_sub_unpackm_c( cntl ),
                     gemm_thread_sub_opackm( thread ) );

	// If any packing buffers were acquired within packm, release them back
	// to the memory manager.
    if( thread_am_ochief( thread ) )
        bli_packm_release( c_pack, cntl_sub_packm_c( cntl ) );
    if( thread_am_ichief( thread ) ){
        bli_packm_release( a1_pack, cntl_sub_packm_a( cntl ) );
        bli_packm_release( b1_pack, cntl_sub_packm_b( cntl ) );
    }
}