Пример #1
0
Файл: test.c Проект: 8l/mxp
/** Compares two vectors, element by element, to see if one is the reverses of other.
 *
 *  @param[in] V1  *in scratch* or *in memory* traverses backward from the end.
 *  @param[in] V2  *in scratch* or *in memory* traverses forward from the start.
 *  @param[in] N   Number of elements in each of the vectors.
 **/
void VBX_T(verify_vector)( void *V1, void *V2, unsigned int N )
{
	vbx_mm_t *v1 = (vbx_mm_t *)(V1);
	vbx_mm_t *v2 = (vbx_mm_t *)(V2);
	unsigned int i, num_error=0;
	vbx_sync();
	if( !v1 || !v2 ) return;
	for( i=0; i<N; i++ ) {
		if( v1[N-1-i] != v2[i] ) {
			if( ++num_error < 20000 ) {
				#if   ( VBX_TEMPLATE_T == WORDSIZE_DEF | VBX_TEMPLATE_T == UWORDSIZE_DEF )
					printf( "ERROR at %d/%d, v1=%"PRId32", v2=%"PRId32"\n", i, N, v1[N-1-i], v2[i] );
				#else
					printf( "ERROR at %d/%d, v1=%d, v2=%d,\n", i, N, v1[N-1-i], v2[i] );
				#endif
			}
		}
		else if( num_error ) {
			#if   ( VBX_TEMPLATE_T == WORDSIZE_DEF | VBX_TEMPLATE_T == UWORDSIZE_DEF )
				printf( "noerr at %d/%d, v1=%"PRId32", v2=%"PRId32"\n", i, N, v1[N-1-i], v2[i] );
			#else
				printf( "noerr at %d/%d, v1=%d, v2=%d\n", i, N, v1[N-1-i], v2[i] );
			#endif
		}
	}
	if( num_error ) { VBX_TEST_FAIL(num_error); exit(-1); }
	vbx_sync();
}
Пример #2
0
int dma_bandwidth_test()
{
	const int num_iter = 64;

	vbx_mxp_t *this_mxp = VBX_GET_THIS_MXP();
	int scratchpad_size = this_mxp->scratchpad_size;

	uint8_t *buf = vbx_shared_malloc(scratchpad_size);
	vbx_ubyte_t *v_buf = vbx_sp_malloc(scratchpad_size);

	vbx_timestamp_t time_start, time_stop;

	int i;
	int len;
	int to_host;
	int errors = 0;

	vbx_mxp_print_params();

	// dma_alignment_bytes gives DMA master data bus width in bytes.
	double bytes_per_sec = \
		(((double) this_mxp->core_freq) * this_mxp->dma_alignment_bytes);
	double max_megabytes_per_sec = bytes_per_sec/(1024*1024);
	printf("\nMax available bandwidth = %s Megabytes/s\n",
	       vbx_eng(max_megabytes_per_sec, 4));

	printf("\n");

	for (to_host = 0; to_host < 2; to_host++) {
		for (len = 32; len <= scratchpad_size ; len *= 2) {
			printf("DMA %s, %d bytes\n", to_host ? "write" : "read", len);
			vbx_timestamp_start();
			if (to_host) {
				time_start = vbx_timestamp();
				for (i = 0; i < num_iter; i++) {
					vbx_dma_to_host(buf, v_buf, len);
				}
				vbx_sync();
				time_stop = vbx_timestamp();
			} else {
				time_start = vbx_timestamp();
				for (i = 0; i < num_iter; i++) {
					vbx_dma_to_vector(v_buf, buf, len);
				}
				vbx_sync();
				time_stop = vbx_timestamp();
			}
			print_dma_bandwidth(time_start, time_stop, len, num_iter,
			                    max_megabytes_per_sec);
			printf("\n");
		}
		printf("\n");
	}

	vbx_shared_free(buf);
	vbx_sp_free();

	return errors;
}
Пример #3
0
void vbx_mtx_fdct_free( vbx_mtx_fdct_free *v )
{
	vbx_shared_free( v );
	vbx_sp_pop();
	//vbx_sync();  // don't wait for result to be written; let it run in the background
	vbx_sync();  // wait for all results?
}
Пример #4
0
int compare_vbx_lbp_ci_to_scalar_patterns(unsigned short* img, int width, int height, int max_print_errors)
{
    int j, errors = 0;
    unsigned char** scalar_patterns = test_scalar_patterns(img, 0, width, height);

    vbx_ubyte_t* v_in = (vbx_ubyte_t*)vbx_sp_malloc(3*width*sizeof(vbx_word_t));
    vbx_ubyte_t* v_top = (vbx_byte_t*)vbx_sp_malloc(width*sizeof(vbx_byte_t));
    vbx_ubyte_t* v_bot = (vbx_byte_t*)vbx_sp_malloc(width*sizeof(vbx_byte_t));
    vbx_ubyte_t* v_lbp = v_bot;

    unsigned char* lbp = (unsigned char*)vbx_shared_malloc(width*sizeof(unsigned char));

    vbx_set_vl(width);
    for(j=0; j < height - 2; j++){
        vbx_dma_to_vector(v_in, img+j*width, 3*width*sizeof(unsigned char));
        vbx(VVHU, VCUSTOM1, v_top, v_in, v_in+width); 
        vbx(VVHU, VCUSTOM1, v_bot, v_in+width, v_in+2*width); 
        vbx(SVHBU, VAND, v_top, 0xf0, v_top);
        vbx(SVHBU, VAND, v_bot, 0x0f, v_bot);
        vbx(VVBU, VADD, v_lbp, v_bot, v_top); 
        vbx_dma_to_host(lbp, v_lbp, width*sizeof(unsigned char));
        vbx_sync();

        errors = match_array_byte(lbp, scalar_patterns[0]+j*width, "custom_lbp", width-2, 1, max_print_errors, 1, j);

    }
    vbx_sp_free();
    vbx_shared_free(lbp);
    return errors;
}
Пример #5
0
	int sobel_argb32_3x3(vbx_uword_t *sobel_out, vbx_uword_t *argb_in, const short image_width,
	                             const short image_height, const short image_pitch, const short renorm)
	{
		size_t free_sp=vbx_sp_getfree();
		size_t vectors_needed=8;
		short partial_width=free_sp/(vectors_needed*sizeof(vbx_uword_t));
		if(partial_width>image_width){
			sobel_argb32_3x3_partial(sobel_out, argb_in, image_width, image_height, image_pitch,renorm);
		}else{
			//can do entire row at a time, so do partial_width at a time
			size_t partial_step=partial_width-2;
			for(int i=0;;i+=partial_step){
				//account for last tile being smaller
				if(i+partial_width > image_width){
					partial_width=image_width-i;
				}

				sobel_argb32_3x3_partial(sobel_out+i, argb_in+i, partial_width, image_height, image_pitch,renorm);

				if(i+partial_width == image_width){
					//that was the last tile, so break,
					//I don't believe that this can be in the for statement
					break;
				}
			}
		}
		VBX::Vector<vbx_uword_t> side(1);
		side=0;
		side.to2D(1,image_height,0).dma_write(sobel_out,image_pitch);//write to first pixel
		side.to2D(1,image_height,0).dma_write(sobel_out+image_width-1,image_pitch);//write to last pixel

		vbx_sync();
		return 0;
	}
Пример #6
0
double test_vector_sp(vbx_mm_t *vector_out, vbx_mm_t  *vector_in1, int IN1ROWS, int IN1COLS, vbx_mm_t  *vector_in2, int IN2ROWS, int IN2COLS, double scalar_time )
{
	typedef vbx_mm_t vbx_sp_t;
	int retval=-1;
	vbx_timestamp_t time_start, time_stop;
	printf( "\nExecuting MXP matrix multiply... src1[%dx%d] src2[%dx%d]\n",IN1ROWS, IN1COLS,IN2ROWS, IN2COLS );

	vbx_timestamp_start();
	time_start = vbx_timestamp();
	vbx_sp_push();
	vbx_sp_t* v_in1=(vbx_sp_t*)vbx_sp_malloc(sizeof(vbx_sp_t)*IN1ROWS*IN1COLS);
	vbx_sp_t* v_in2=(vbx_sp_t*)vbx_sp_malloc(sizeof(vbx_sp_t)*IN2ROWS*IN2COLS);
	vbx_sp_t* v_out=(vbx_sp_t*)vbx_sp_malloc(sizeof(vbx_sp_t)*IN1ROWS*IN2COLS);
	if(v_out!=NULL){
		vbx_dma_to_vector(v_in1,vector_in1,sizeof(vbx_sp_t)*IN1ROWS*IN1COLS);
		vbx_dma_to_vector(v_in2,vector_in2,sizeof(vbx_sp_t)*IN2ROWS*IN2COLS);
		retval = vbw_mtx_mul( v_out, v_in1, IN1ROWS, IN1COLS, v_in2, IN2ROWS, IN2COLS );
		vbx_dma_to_host(vector_out,v_out,sizeof(vbx_sp_t)*IN1ROWS*IN2COLS);
		vbx_sync();
	}else{
		printf("not enough sp space for sp test");
	}
	time_stop = vbx_timestamp();
	printf( "...done. retval:0x%08X\n", retval );
	return vbx_print_vector_time( time_start, time_stop, scalar_time );
}
Пример #7
0
int main(void)
{
	vbx_test_init();

	vbx_mxp_t *this_mxp = VBX_GET_THIS_MXP();
	const int VBX_SCRATCHPAD_SIZE = this_mxp->scratchpad_size;
	const int required_vectors = 4;

	int N = VBX_SCRATCHPAD_SIZE / sizeof(vbx_mm_t) / required_vectors;

	int PRINT_LENGTH = min( N, MAX_PRINT_LENGTH );

	double scalar_time, vector_time;
	int errors=0;

	vbx_mxp_print_params();
	printf( "\nAdd test...\n" );
	printf( "Vector length: %d\n", N );

	vbx_mm_t *scalar_in1 = malloc( N*sizeof(vbx_mm_t) );
	vbx_mm_t *scalar_in2 = malloc( N*sizeof(vbx_mm_t) );
	vbx_mm_t *scalar_out = malloc( N*sizeof(vbx_mm_t) );

	vbx_mm_t *vector_in1 = vbx_shared_malloc( N*sizeof(vbx_mm_t) );
	vbx_mm_t *vector_in2 = vbx_shared_malloc( N*sizeof(vbx_mm_t) );
	vbx_mm_t *vector_out = vbx_shared_malloc( N*sizeof(vbx_mm_t) );
//	vbx_mm_t *vector_out = vector_in2 - 5;


	vbx_sp_t *v_in1 = vbx_sp_malloc( N*sizeof(vbx_sp_t) );
	vbx_sp_t *v_in2 = vbx_sp_malloc( N*sizeof(vbx_sp_t) );
	vbx_sp_t *v_out = vbx_sp_malloc( N*sizeof(vbx_sp_t) );
//	vbx_sp_t *v_out = v_in2-5;

	VBX_T(test_zero_array)( scalar_out, N );
	VBX_T(test_zero_array)( vector_out, N );

	VBX_T(test_init_array)( scalar_in1, N, 1 );
	VBX_T(test_copy_array)( vector_in1, scalar_in1, N );
	VBX_T(test_init_array)( scalar_in2, N, 1 );
	VBX_T(test_copy_array)( vector_in2, scalar_in2, N );

	VBX_T(test_print_array)( scalar_in1, PRINT_LENGTH );
	VBX_T(test_print_array)( scalar_in2, PRINT_LENGTH );

	scalar_time = test_scalar( scalar_out, scalar_in1, scalar_in2, N );
	VBX_T(test_print_array)( scalar_out, PRINT_LENGTH);

	vbx_dma_to_vector( v_in1, (void *)vector_in1, N*sizeof(vbx_sp_t) );
	vbx_dma_to_vector( v_in2, (void *)vector_in1, N*sizeof(vbx_sp_t) );
	vector_time = test_vector( v_out, v_in1, v_in2, N, scalar_time );
	vbx_dma_to_host( (void *)vector_out, v_out, N*sizeof(vbx_sp_t) );
	vbx_sync();
	VBX_T(test_print_array)( vector_out, PRINT_LENGTH );

	errors += VBX_T(test_verify_array)( scalar_out, vector_out, N );

	VBX_TEST_END(errors);
	return 0;
}
Пример #8
0
int main(void)
{
	vbx_test_init();
	vbx_mxp_print_params();
	int errors=0;
	unsigned instr_cycles,instr_count, dma_cycles,dma_count;
	vbx_mxp_t *this_mxp = VBX_GET_THIS_MXP();
	int lanes= this_mxp->vector_lanes;
	int dma_width=this_mxp->dma_alignment_bytes /4;
	debug(lanes);
	debug(dma_width);
	vbx_set_vl(-1);
	VBX_COUNTER_RESET();
	vbx(SVW,VMOV,0,0,0);
	vbx_sync();
	if(VBX_SIMULATOR)
		printf("simulator\n");
	else
		printf("not simulator\n");
	instr_cycles=VBX_GET_WRITEBACK_CYCLES();
	dma_cycles=VBX_GET_DMA_CYCLES();
	dma_count=VBX_GET_DMAS();
	instr_count=VBX_GET_INSTRUCTIONS();


	debug(instr_cycles);
	debug(dma_cycles);
	debug(dma_count);
	debug(instr_count );

	VBX_TEST_END(errors);
	return 0;
}
Пример #9
0
Файл: test.c Проект: 8l/mxp
/** Prints a vector of N elements, 15 elements per row.
 *
 *  @param[in] str A label to print first.
 *  @param[in] V   Vector to print *in scratch* or *in memory*.
 *  @param[in] N   Number of elements in the vector.
 **/
void VBX_T(print_vector)( char *str, void *V, unsigned int N )
{
	vbx_mm_t *v = (vbx_mm_t *)(V);
	unsigned int i;
	vbx_sync();
	printf( str );
	for( i=0; i<N; i++ ) {
		if( (i&15) == 0 ) printf("\n");
		#if   ( VBX_TEMPLATE_T == WORDSIZE_DEF | VBX_TEMPLATE_T == UWORDSIZE_DEF )
			printf( "%4"PRId32" ", v[i] );
		#else
			printf( "%d ", v[i] );
		#endif
	}
	printf("\n");
	vbx_sync();
}
Пример #10
0
int compare_vbx_lut_to_vbx_lut_ci(int sz, int max_print_errors)
{
    int f, n, errors;

    vbx_byte_t* v_pass = (vbx_byte_t*)vbx_sp_malloc(sz*sizeof(vbx_byte_t));
    vbx_ubyte_t* v_pattern = (vbx_ubyte_t*)vbx_sp_malloc(sz*sizeof(vbx_byte_t));
    vbx_ubyte_t* v_lutc = (vbx_ubyte_t*)vbx_sp_malloc(sz*sizeof(vbx_byte_t));
    vbx_ubyte_t* v_group = (vbx_ubyte_t*)vbx_sp_malloc(sz*sizeof(vbx_byte_t));
    vbx_ubyte_t* v_sel = (vbx_ubyte_t*)vbx_sp_malloc(sz*sizeof(vbx_byte_t));
    vbx_ubyte_t* v_lut = (vbx_ubyte_t*)vbx_sp_malloc(sz*sizeof(vbx_word_t));
    vbx_ubyte_t* v_idx = (vbx_ubyte_t*)vbx_sp_malloc(sz*sizeof(vbx_word_t));

    unsigned char* lut = (unsigned char*)vbx_shared_malloc(sz*sizeof(unsigned char));
    unsigned char* lut_c = (unsigned char*)vbx_shared_malloc(sz*sizeof(unsigned char));

    for (n = 0; n < sz; n++) {
        v_pattern[n] = n & 0xff;
    }

    int s, stage = 11;
    for (f = 0; f < face_lbp[stage].count; f++) {
        lbp_feat_t feat = face_lbp[stage].feats[f];

        vbx_set_vl(sz);
        int total = f;
        s = 0;
        while(s < stage){
            total += face_lbp[s].count;
            s++;
        }
        vbx(SVBU, VCUSTOM0, v_lutc, total, v_pattern);

        vbx(SVB, VMOV, v_pass, feat.fail, 0);
        /* check if pattern is in lut */
        vbx(SVBU, VSHR, v_group, 5, v_pattern);
        for (n = 0; n < 8; n++) {
            vbx(SVB, VADD, v_sel, -n, v_group);
            vbx(SVBW, VCMV_Z, v_lut, feat.lut[n], v_sel);
        }

        vbx(SVBWU, VAND, v_idx, 0x1f, v_pattern);
        vbx(VVWB, VSHR, v_lut, v_idx, v_lut);
        vbx(SVB, VAND, v_lut, 1, v_lut);
        vbx(SVB, VCMV_LEZ, v_pass, feat.pass, v_lut);

        vbx_dma_to_host(lut_c, v_lutc, sz*sizeof(unsigned char));
        vbx_dma_to_host(lut, v_pass, sz*sizeof(unsigned char));
        vbx_sync();

        errors = match_array_byte(lut_c, lut, "custom_lut", sz, 1, max_print_errors, 0, 0);

    }
    vbx_sp_free();
    vbx_shared_free(lut);
    vbx_shared_free(lut_c);
    return errors;
}
Пример #11
0
Файл: test.c Проект: 8l/mxp
int deep_vector_copy_test()
{
	int retval;
	int num_test;
	int total_errors = 0;
	const int NUM_TESTS = TEST_DEEP_SP_NUM_TESTS;
	const int NB = vbx_sp_getfree();

	int NT = NB / sizeof(vbx_sp_t);

	vbx_sp_push();
	vbx_sp_t *v = vbx_sp_malloc( NB );

	srand( 0x1a84c92a );

	for( num_test=0; num_test < NUM_TESTS ; num_test++ ) {

		// initialize entire available scratchpad
		vbx_set_vl( NT );
		vbx( SE(T), VAND, v, MSK, 0 );

		// choose random src/dest/length:
		// -- randomly pick the dest
		// -- set a window size of 2*K around the dest
		// -- randomly pick the src within the window
		// -- randomly pick the length, subject to end-of-scratchpad
		// -- this 'window' rule increases probability of overlaps
		// -- rough distribution: 30% short (pipeline) overlaps, 20% long overlaps, 50% no overlap

		int K, N1, N2, NN;
		N1 = rand() % NT;
		K  = 1 + rand() % ((N1 > 0)? min(min(N1, NT-N1), 1024): min(NT, 1024));
		N2 = N1 - K + rand() % (2*K);
		NN = rand() % (NT - max(N1,N2));
		vbx_sp_t *dst = v + N1;
		vbx_sp_t *src = v + N2;

		printf("test:%d src:0x%08x dst:0x%08x len:%08d", num_test, N1, N2, NN );

		// do the copy
		retval = VBX_T(vbw_vec_copy)( dst, src, NN );
		vbx_sync();
		printf(" retval:0x%04x\n",retval);

		// ensure the copy was done properly
		int errors = verify_copy((vbx_mm_t *)v,     0,    N1,       0, "head")
		           + verify_copy((vbx_mm_t *)v,    N1, NN+N1, (N2-N1), "copy")
		           + verify_copy((vbx_mm_t *)v, NN+N1,    NT,       0, "tail");
		total_errors += errors;
		if( errors ) {
			//break;
		}
	}

	vbx_sp_pop();
	return total_errors;
}
Пример #12
0
Файл: test.c Проект: 8l/mxp
int deep_vector_copy_ext_test()
{
	vbx_mxp_t *this_mxp = VBX_GET_THIS_MXP();
	int retval;
	int num_test;
	int total_errors = 0;
	const int NUM_TESTS = TEST_DEEP_MM_NUM_TESTS;
	int NB = this_mxp->scratchpad_size * 10;
	int NT = NB / sizeof(vbx_mm_t);

	vbx_mm_t *v = vbx_shared_malloc( NB );

	srand( 0x1a84c92a );

	int i;

	for( num_test=0; num_test < NUM_TESTS ; num_test++ ) {

		//	initialize the whole working space
		for( i=0; i<NT; i++ ) {
			v[i] = i & MSK;
		}

		// choose random src/dest/length:
		// -- randomly pick the dest
		// -- set a window size of 2*K around the dest
		// -- randomly pick the src within the window
		// -- randomly pick the length, subject to end-of-scratchpad
		// -- this 'window' rule increases probability of overlaps
		// -- rough distribution: 30% short (pipeline) overlaps, 20% long overlaps, 50% no overlap

		int K, N1, N2, NN;
		N1 = rand() % NT;
		K  = 1 + rand() % ((N1 > 0)? min(min(N1, NT-N1), 1024): min(NT, 1024));
		N2 = N1 - K + rand() % (2*K);
		NN = rand() % (NT - max(N1,N2));
		vbx_mm_t *dst = v + N1;
		vbx_mm_t *src = v + N2;
		printf("test:%d src:0x%08x dst:0x%08x len:%08d", num_test, N1, N2, NN );

		// do the copy
		retval = VBX_T(vbw_vec_copy_ext)( dst, src, NN );
		vbx_sync();
		printf(" retval:0x%04x\n",retval);

		// ensure the copy was done properly
		int errors = verify_copy(v,     0,    N1,       0, "head")
		           + verify_copy(v,    N1, NN+N1, (N2-N1), "copy")
		           + verify_copy(v, NN+N1,    NT,       0, "tail");
		total_errors += errors;
		if( errors ) {
			//break;
		}
	}

	return total_errors;
}
Пример #13
0
Файл: test.c Проект: 8l/mxp
int compare_scalar_rgb2luma_to_vbw_rgb2luma16(unsigned short *img, unsigned short *vbx_img, pixel *vbx_input, int width, int height, int stride, int max_print_errors)
{
    int errors;
    scalar_rgb2luma(img, vbx_input, width, height, stride);
    vbw_rgb2luma16(vbx_img, vbx_input, width, height, stride);
    vbx_sync();

    errors = match_array_half(img, vbx_img, "greyscale", width, height, 0, max_print_errors, 0);
    return errors;
}
Пример #14
0
	inline int vec_fir_tiler(vbx_mm_t *output, vbx_mm_t *input, vbx_mm_t *coeffs,  int sample_size,  int num_taps)
	{
		typedef vbx_mm_t vbx_sp_t;

		//use 1/8 of scratchpad, only really need 1/4, but lets be safe
		int chunk_size = vbx_sp_getfree()>>3;
		//divide by sizeof vbx_sp_t
		chunk_size >>= (sizeof(vbx_sp_t)==sizeof(vbx_word_t)?2:
		                sizeof(vbx_sp_t)==sizeof(vbx_half_t)?1:0);
		// Note: chunksize is the size of the input chunk, so the output
		// chunk is chunk_size - num_taps.
		if( chunk_size==0 ){
			return VBW_ERROR_SP_ALLOC_FAILED;
		}
		VBX::Vector<vbx_sp_t> v_coeffs(num_taps);
		v_coeffs.dma_read(coeffs);
		VBX::Prefetcher<vbx_sp_t> input_dbl_buf(1,chunk_size+num_taps,input,input+sample_size,chunk_size);


		input_dbl_buf.fetch();
		//if the entire sample ifts in the scratchpad, do that.
		if(chunk_size>sample_size-num_taps){
			//do in sp fir filter
			VBX::Vector<vbx_sp_t>& v_in=input_dbl_buf[0];
			VBX::Vector<vbx_sp_t> v_out(sample_size-num_taps);
			vec_fir(v_out,v_in,v_coeffs);
			v_out.dma_write(output);
			vbx_sync();
			return VBW_SUCCESS;
		}
		VBX::Vector<vbx_sp_t> v_out(chunk_size);

		int num_chunks=(sample_size + chunk_size/2)/chunk_size;
		for(int chunk=0;chunk<num_chunks;chunk++){
			input_dbl_buf.fetch();
			VBX::Vector<vbx_sp_t>& v_in=input_dbl_buf[0];
			vec_fir(v_out,v_in,v_coeffs);
			v_out[0 upto v_in.size-num_taps].dma_write(output+chunk*chunk_size);
		}
		vbx_sync();
		return VBW_SUCCESS;
	}
Пример #15
0
Файл: test.c Проект: 8l/mxp
double test_vector_power( vbx_word_t *vector_out, vbx_word_t *vector_in1, vbx_word_t *vector_in2, int N, double scalar_time )
{
	int retval;
	vbx_timestamp_t time_start, time_stop;
	printf("\nExecuting MXP vector software power...");
 	vbx_word_t *v_out = vbx_sp_malloc( N*sizeof(vbx_word_t) );
	vbx_word_t *v_in1 = vbx_sp_malloc( N*sizeof(vbx_word_t) );
	vbx_word_t *v_in2 = vbx_sp_malloc( N*sizeof(vbx_word_t) );
	vbx_dma_to_vector( v_in1, vector_in1, N*sizeof(vbx_word_t) );
	vbx_dma_to_vector( v_in2, vector_in2, N*sizeof(vbx_word_t) );
	vbx_timestamp_start();

	time_start = vbx_timestamp();
	retval = vbw_vec_power_word( v_out, v_in1, v_in2, N );
	vbx_sync();
	time_stop = vbx_timestamp();
	vbx_dma_to_host( vector_out, v_out, N*sizeof(vbx_word_t) );
	vbx_sync();

	printf("done. retval:%X\n",retval);
	return vbx_print_vector_time(time_start, time_stop, scalar_time);
}
Пример #16
0
static int isAbsOutOfRangeV( vptr_half v_src_r, vptr_half v_src_i, vptr_half v_temp, int n )
{

	//used for inverse only
	vbx_set_vl(n);
	vbx(SVH, VABSDIFF, v_temp, 0, v_src_r );    // get abs value of real
	vbx(SVH, VSUB, v_temp, 16383, v_temp );     // if (16383 - v_src) < 0, needs scaling
	vbx_acc(SVH, VCMV_LTZ, v_temp, 1, v_temp ); // accum # of neg values to see if scaling required
	vbx_sync();
	if( v_temp[0] ){
		return 1;
	}

	vbx(SVH, VABSDIFF, v_temp, 0, v_src_i );    // get abs value of imag
	vbx(SVH, VSUB, v_temp, 16383, v_temp );     // if (16383 - v_src) < 0, needs scaling
	vbx_acc(SVH, VCMV_LTZ, v_temp, 1, v_temp ); // accum # of neg values to see if scaling required
	vbx_sync();
	if( v_temp[0] ){
		return 1;
	}

	return 0;
}
Пример #17
0
Файл: test.c Проект: 8l/mxp
double test_vector_ext( vbx_mm_t *out, vbx_mm_t *in, int N, double scalar_time )
{
	int retval;
	vbx_timestamp_t time_start, time_stop;
	printf( "\nExecuting vector copy (ext)...\n" );

	vbx_timestamp_start();
	time_start = vbx_timestamp();
	retval = VBX_T(vbw_vec_copy_ext)( out, in, N );
	vbx_sync();
	time_stop = vbx_timestamp();

	printf( "...done. retval: %X\n", retval );
	return vbx_print_vector_time(time_start, time_stop, scalar_time);
}
Пример #18
0
double test_vector( vbx_sp_t *v_out, vbx_sp_t *v_in1, vbx_sp_t *v_in2, int N, double scalar_time )
{
	int retval;

	vbx_timestamp_t time_start, time_stop;
	printf( "\nExecuting MXP vector add...\n" );

	vbx_timestamp_start();
	time_start = vbx_timestamp();
	retval = VBX_T(vbw_vec_add)( v_out, v_in1, v_in2, N );
	vbx_sync();
	time_stop = vbx_timestamp();

	printf( "...done. retval: %X\n", retval );
	return vbx_print_vector_time(time_start, time_stop, scalar_time );
}
Пример #19
0
int vbw_sobel_argb32_3x3(unsigned *output, unsigned *input, const short image_width, const short image_height, const short image_pitch, const short renorm)
{
	size_t free_sp=vbx_sp_getfree();
	size_t vectors_needed=8;
	size_t partial_width=free_sp/(vectors_needed*sizeof(vbx_uword_t));
	if(partial_width>image_width){
		vbw_sobel_argb32_3x3_partial(output, input, image_width, image_height, image_pitch,renorm);
	}else{
		//can do entire row at a time, so do partial_width at a time
		size_t partial_step=partial_width-2;
		int i;
		for(i=0;;i+=partial_step){
			//account for last tile being smaller
			if(i+partial_width > image_width){
				partial_width=image_width-i;
			}

			vbw_sobel_argb32_3x3_partial(output+i, input+i, partial_width, image_height, image_pitch,renorm);

			if(i+partial_width == image_width){
				//that was the last tile, so break,
				//I don't believe that this can be in the for statement
				break;
			}
		}
	}
	vbx_sp_push();
	vbx_word_t* side=vbx_sp_malloc(sizeof(vbx_word_t));
	vbx_set_vl(1);
	vbx(SVW,VMOV,side,0,0);
	vbx_dma_to_host_2D(output,/*host_ptr*/
	                   side,/*sp_ptr*/
	                   sizeof(vbx_word_t),/*row len*/
	                   image_height,/*num rows*/
	                   image_pitch*sizeof(vbx_word_t),/*host_incr*/
	                   0);/*sp incr*/
	vbx_dma_to_host_2D(output+image_width-1,/*host_ptr*/
	                   side,/*sp_ptr*/
	                   sizeof(vbx_word_t),/*row len*/
	                   image_height,/*num rows*/
	                   image_pitch*sizeof(vbx_word_t),/*host_incr*/
	                   0);/*sp incr*/
	vbx_sp_pop();
	vbx_sync();

}
Пример #20
0
Файл: test.c Проект: 8l/mxp
int compare_vbx_lbp_ci_to_scalar_patterns(unsigned short* img, int log, int width, int height, int max_print_errors)
{
    int j, l, cell, max_cell, errors = 0;
    unsigned char** scalar_patterns = test_scalar_patterns(img, log, width, height);

    max_cell = 1<<log;
    vbx_uhalf_t* v_in = (vbx_uhalf_t*)vbx_sp_malloc((1+2*max_cell)*width*sizeof(vbx_half_t));
    vbx_uhalf_t* v_top = (vbx_half_t*)vbx_sp_malloc(width*sizeof(vbx_half_t));
    vbx_uhalf_t* v_bot = (vbx_half_t*)vbx_sp_malloc(width*sizeof(vbx_half_t));
    vbx_ubyte_t* v_lbp = (vbx_ubyte_t*)v_bot;

    unsigned char* lbp = (unsigned char*)vbx_shared_malloc(width*sizeof(unsigned char));

    vbx_set_vl(width);
    for(l = 0; l < 1; l++){
        cell = 1<<l;
        for(j=0; j < height - 2*cell; j++){
            vbx_dma_to_vector(v_in, img+j*width, (1+2*cell)*width*sizeof(unsigned short));
            vbx(VVHU, VCUSTOM1, v_top, v_in, v_in+(1*cell)*width); 
            vbx(VVHU, VCUSTOM1, v_bot, v_in+(1*cell)*width, v_in+(2*cell)*width); 
            vbx(SVHBU, VAND, (vbx_ubyte_t*)v_top, 0xf0, v_top);
            vbx(SVHBU, VAND, (vbx_ubyte_t*)v_bot, 0x0f, v_bot);
            vbx(VVBU, VADD, v_lbp, v_bot, v_top); 
            vbx_dma_to_host(lbp, v_lbp, width*sizeof(unsigned char));
            vbx_sync();

            errors += match_array_byte(lbp, scalar_patterns[l]+j*width, "custom_lbp", width-2*cell, 1, 0, max_print_errors, 1, j);
            if (errors > max_print_errors){
                max_print_errors = 0;
            }

        }
    }
    vbx_sp_free();
    vbx_shared_free(lbp);
    return errors;
}
Пример #21
0
Файл: test.c Проект: 8l/mxp
//FIXME stride for match not implemented
int compare_LBPPassStage_to_restricted(unsigned short *vbx_img, int log, lbp_stage_t lbp_stage, int window, int width, int height, int max_print_errors)
{
    int l, i, j, cell, errors = 0;

    unsigned char** scalar_patterns = test_scalar_patterns(vbx_img, log, width, height);

    unsigned char *pass, *vbx_pass;
    pass = (unsigned char*)vbx_shared_malloc(width*height*sizeof(unsigned char));
    vbx_pass = (unsigned char*)vbx_shared_malloc(width*height*sizeof(unsigned char));
    
    vbx_byte_t** v_lbp =(vbx_byte_t**)vbx_shared_malloc((log+1)*sizeof(vbx_byte_t*));
    for (l=0; l<log+1; l++) {
        v_lbp[l] = (vbx_byte_t*)vbx_sp_malloc((window+1)*width*sizeof(vbx_byte_t)); 
    }
    vbx_byte_t* v_lut = (vbx_byte_t*)vbx_sp_malloc(width*sizeof(vbx_byte_t)); 
    vbx_byte_t* v_stage = (vbx_byte_t*)vbx_sp_malloc(width*sizeof(vbx_byte_t)); 
    vbx_byte_t* v_pattern;
    lbp_feat_t feat;
    int dx, dy, dw, f;

    for (l=0; l<log+1; l++) {
        vbx_dma_to_vector(v_lbp[l]+width, scalar_patterns[l], (window)*width*sizeof(unsigned char));
    }
    vbx_sync();
    for(j=0; j < height-(window+1); j++) {
        for (l=0; l<log+1; l++) {
            vbx_set_vl(width * window);
            vbx(VVB, VMOV, v_lbp[l], v_lbp[l]+width, NULL);
            vbx_dma_to_vector(v_lbp[l] + window*width, scalar_patterns[l]+(j+window)*width, width*sizeof(unsigned char));
        }

        vbx_set_vl(width-(window+1));
        vbx(SVB, VMOV, v_stage, 0, NULL);
        for (f = 0; f < lbp_stage.count; f++) {
            feat = lbp_stage.feats[f];
            dx = feat.pos.src.x;
            dy = feat.pos.src.y;
            dw = feat.pos.size.x;
            v_pattern = v_lbp[dw>>1]+(dy*width+dx);

            vbx(SVBU, VLBPLUT, v_lut, f, v_pattern);
            vbx(VVB, VADD, v_stage, v_stage, v_lut);
        }
        vbx(SVB, VMOV, v_lut, 0, NULL);
        vbx(SVB, VCMV_GEZ, v_lut, 1, v_stage);
        vbx_dma_to_host(vbx_pass + j*width, v_lut, (width-(window+1))*sizeof(unsigned char));
        vbx_sync();
    }


    unsigned int *iImg, *iiImg;
    iImg = (unsigned int *)vbx_shared_malloc(width*height*sizeof(unsigned int));
    iiImg = (unsigned int *)vbx_shared_malloc(width*height*sizeof(unsigned int));

    gen_integrals(vbx_img, iImg, iiImg, width, height);

    image_t lbp_img = {iImg, {width, height}};
    for (j = 0; j < height - (window + 1); j++) {
        for (i = 0; i < width - (window + 1); i++) {
            pair_t lbp_p = {i, j};
            pass[j*width+i] = LBPPassStage(lbp_img, lbp_stage, lbp_p);
        }
    }

    /* test pass vs vbx pass */
    for (j = 0; j < height - (window + 1); j++) {
        errors += match_array_byte(vbx_pass + j*width, pass + j*width, "pass stage", width - (window + 1), 1, 0, max_print_errors, 1, j);
        if (errors > max_print_errors){
            max_print_errors = 0;
        }
    }
    return errors;
}
Пример #22
0
Файл: test.c Проект: 8l/mxp
int VBX_T(vbw_vec_reverse_test)()
{
	unsigned int aN[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 12, 15, 16, 17, 20, 25, 31, 32, 33, 35, 40, 48, 60, 61, 62, 63, 64, 64, 65,
	                      66, 67, 68, 70, 80, 90, 99, 100, 101, 110, 128, 128, 144, 144, 160, 160, 176, 176, 192, 192, 224, 224,
	                      256, 256, 288, 288, 320, 320, 352, 352, 384, 384, 400, 450, 512, 550, 600, 650, 700, 768, 768, 900,
	                      900, 1023, 1024, 1200, 1400, 1600, 1800, 2048, 2048, 2100, 2200, 2300, 2400, 2500, 2600, 2700, 2800,
	                      2900, 3000, 3100, 3200, 3300, 3400, 3500, 3600, 3700, 3800, 3900, 4000, 4096, 4096, 4100, 4200, 4300,
	                      4400, 4500, 4600, 4700, 4800, 4900, 5000, 6000, 7000, 8000, 8192, 8192, 9000, 10000, 11000, 12000,
	                      13000, 14000, 15000, 16000, 16384, 16384, 20000, 25000, 30000, 32767, 32768, 32768, 35000, 40000,
	                      45000, 50000, 55000, 60000, 65000, 65535, 65536, 65536 };

	int retval;
	unsigned int N;
	unsigned int NBYTES;
	unsigned int NREPS = 100;
	unsigned int i,k;

	vbx_timestamp_t start=0,finish=0;

	vbx_mxp_t *this_mxp = VBX_GET_THIS_MXP();
	const unsigned int VBX_SCRATCHPAD_SIZE = this_mxp->scratchpad_size;

	for( i=0; i<sizeof(aN)/4; i++ ) {
		N = aN[i];
		//printf( "testing with vector size %d\n", N );

		NBYTES = sizeof(vbx_sp_t)*N;
		if( 2*NBYTES > VBX_SCRATCHPAD_SIZE ) continue;

		vbx_sp_t *vsrc = vbx_sp_malloc( NBYTES );
		vbx_sp_t *vdst = vbx_sp_malloc( NBYTES );
		//printf("bytes alloc: %d\n", NBYTES );

		if( !vsrc ) VBX_EXIT(-1);
		if( !vdst ) VBX_EXIT(-1);

		#if   ( VBX_TEMPLATE_T == BYTESIZE_DEF | VBX_TEMPLATE_T == UBYTESIZE_DEF )
			unsigned int mask = 0x007F;
		#elif ( VBX_TEMPLATE_T == HALFSIZE_DEF | VBX_TEMPLATE_T == UHALFSIZE_DEF )
			unsigned int mask = 0x7FFF;
		#else
			unsigned int mask = 0xFFFF;
		#endif

		vbx_set_vl( N );
		vbx( SV(T), VMOV, vdst,   -1, 0 );       // Fill the destination vector with -1
		vbx( SE(T), VAND, vsrc, mask, 0 );       // Fill the source vector with enumerated values
		//VBX_T(print_vector)( "vsrcInit", vsrc, N );
		//VBX_T(print_vector)( "vdstInit", vdst, N );

		/** measure performance of function call **/
		vbx_sync();
		start = vbx_timestamp();
		for(k=0; k<NREPS; k++ ) {
			retval = VBX_T(vbw_vec_reverse)( vdst, vsrc, N );
			vbx_sync();
		}
		finish = vbx_timestamp();
		printf( "length %d (%s):\tvbware sp f():\t%llu", N, VBX_EXPAND_AND_QUOTE(BYTEHALFWORD), (unsigned long long) vbx_mxp_cycles((finish-start)/NREPS) );
		//VBX_T(print_vector)( "vsrcPost", vsrc, N );
		//VBX_T(print_vector)( "vdstPost", vdst, N );

		#if VERIFY_VBWARE_ALGORITHM
			VBX_T(verify_vector)( vsrc, vdst, N );
		#else
			printf(" [VERIFY OFF]");
		#endif

		printf("\treturn value: %X", retval);

		vbx_set_vl( N );
		vbx( SE(T), VAND, vsrc, mask, 0 );       // Reset the source vector

		/** measure performance of simple algorithm **/
		vbx_sync();
		vbx_set_vl( 1 );
		vbx_set_2D( N, -sizeof(vbx_sp_t), sizeof(vbx_sp_t), 0 );

		start = vbx_timestamp();
		for(k=0; k<NREPS; k++ ) {
			vbx_2D( VV(T), VMOV, vdst+N-1, vsrc, 0 );
			vbx_sync();
		}
		finish = vbx_timestamp();

		printf( "\tsimple (vl=1):\t%llu", (unsigned long long) vbx_mxp_cycles((finish-start)/NREPS) );

		#if VERIFY_SIMPLE_ALGORITHM
			VBX_T(verify_vector)( vsrc, vdst, N );
		#else
			printf(" [VERIFY OFF]");
		#endif
			printf("\tcycles\n");

		vbx_sp_free();
	}

	vbx_sp_free();
	printf("All tests passed successfully.\n");

	return 0;
}
Пример #23
0
int vbw_bifilt_argb32_3x3(unsigned *output, unsigned *input, short image_width, const short image_height, const short image_pitch, const short renorm)
{

//return vbw_sobel_argb32_3x3( output, input, image_width, image_height, image_pitch, renorm);

	int y;
	int xx, yy, sharp;

	vbx_uword_t *v_row_in;
	vbx_ubyte_t *v_luma_top, *v_luma_mid, *v_luma_bot;
	vbx_ubyte_t *v_luma_hii,              *v_luma_low;
	vbx_ubyte_t *v_src[W][W];

	vbx_uword_t *v_row_out;

	vbx_ubyte_t *v00, *v01, *v02, *v10, *v11, *v12, *v20, *v21, *v22;
#if W==5
	vbx_ubyte_t *v03, *v04,       *v13, *v14,       *v23, *v24;
	vbx_ubyte_t *v30, *v31, *v32, *v40, *v41, *v42;
	vbx_ubyte_t *v33, *v34,       *v43, *v44;
#endif
	vbx_ubyte_t *v[W][W];

	vbx_uhalf_t *vI, *vW, *vT;  // vT== temporary


	vbx_sp_push();

	// Allocate space in scratchpad for vectors
	struct rotating_prefetcher_t v_row_db=rotating_prefetcher(1,image_width*sizeof(vbx_uword_t),
	                                                          input,input+image_height*image_pitch,
	                                                          image_pitch*sizeof(vbx_uword_t));

	v_row_out  = (vbx_uword_t*)vbx_sp_malloc(image_width*sizeof(vbx_uword_t));
	vT         = (vbx_uhalf_t*)vbx_sp_malloc(image_width*sizeof(vbx_uhalf_t));
#if 1
	// save some space by overlapping with v_row_out
	vW         = (vbx_uhalf_t*)v_row_out;
	vI         = (vbx_uhalf_t*)v_row_out + image_width;
#else
	vW         = (vbx_uhalf_t*)vbx_sp_malloc(image_width*sizeof(vbx_uhalf_t));
	vI         = (vbx_uhalf_t*)vbx_sp_malloc(image_width*sizeof(vbx_uhalf_t));
#endif

#if W==3
	v_luma_top      = (vbx_ubyte_t*)vbx_sp_malloc( 3 * image_width*sizeof(vbx_ubyte_t));
	v_luma_mid      = v_luma_top + 1 * image_width*sizeof(vbx_ubyte_t) ;
	v_luma_bot      = v_luma_top + 2 * image_width*sizeof(vbx_ubyte_t) ;
#else
	v_luma_top      = (vbx_ubyte_t*)vbx_sp_malloc( 5 * image_width*sizeof(vbx_ubyte_t));
	v_luma_hii      = v_luma_top + 1 * image_width*sizeof(vbx_ubyte_t) ;
	v_luma_mid      = v_luma_top + 2 * image_width*sizeof(vbx_ubyte_t) ;
	v_luma_low      = v_luma_top + 3 * image_width*sizeof(vbx_ubyte_t) ;
	v_luma_bot      = v_luma_top + 4 * image_width*sizeof(vbx_ubyte_t) ;
#endif


	if(v_luma_bot==NULL){
		vbx_sp_pop();
		return VBW_ERROR_SP_ALLOC_FAILED;
	}

	// Transfer the first 3 input rows and interleave first 2 rgb2luma and first 2 sobel row calculations
#if W==3
	rp_fetch(&v_row_db);
	v_row_in = rp_get_buffer(&v_row_db,0);
	vbw_rgb2luma(vW, v_row_in, vT, image_width);                                // 1st luma row
	vbx( SVHBU, VSHR, v_luma_top, 8, vW );                                     // convert to byte

	v_row_in = rp_fetch(&v_row_db);
	v_row_in = rp_get_buffer(&v_row_db,0);
	vbw_rgb2luma( vW, v_row_in, vT, image_width);                               // 2nd luma row
	vbx( SVHBU, VSHR, v_luma_mid, 8,  vW );                                    // convert to byte

#else
	rp_fetch(&v_row_db);
	v_row_in = rp_get_buffer(&v_row_db,0);
	vbw_rgb2luma(vW, v_row_in, vT, image_width);                                // 1st luma row
	vbx( SVHBU, VSHR, v_luma_top, 8, vW );                                     // convert to byte

	rp_fetch(&v_row_db);
	v_row_in = rp_get_buffer(&v_row_db,0);
	vbw_rgb2luma( vW, v_row_in, vT, image_width);                               // 2nd luma row
	vbx( SVHBU, VSHR, v_luma_hii, 8,  vW );                                    // convert to byte

	rp_fetch(&v_row_db);
	v_row_in = rp_get_buffer(&v_row_db,0);
	vbw_rgb2luma( vW, v_row_in, vT, image_width);                               // 2nd luma row
	vbx( SVHBU, VSHR, v_luma_mid, 8,  vW );                                    // convert to byte

	rp_fetch(&v_row_db);
	v_row_in = rp_get_buffer(&v_row_db,0);
	vbw_rgb2luma( vW, v_row_in, vT, image_width);                               // 2nd luma row
	vbx( SVHBU, VSHR, v_luma_low, 8,  vW );                                    // convert to byte
#endif


	// blank out the top and bottom rows
	unsigned *out;
	vbx_set_vl(image_width);
	unsigned COLOUR = ( 200 | (128<<8) | (244<<16) );
	vbx(SVWU, VMOV, v_row_out, COLOUR, 0);
	for( y=0; y<W/2; y++ ) {
		// Set top output rows to 0
		out = output + image_width*y;
		vbx_dma_to_host( out, v_row_out, image_width*sizeof(vbx_uword_t) );
		// Set bottom rows to 0
		out = output + image_width*(image_height-1-y);
		vbx_dma_to_host( out, v_row_out, image_width*sizeof(vbx_uword_t) );
	}



	// Calculate edges
	for (y = 0; y < image_height-(W-1); y++) {

		vbx_set_vl(image_width);
		// Transfer the next input row while processing
		rp_fetch(&v_row_db);
		v_row_in = rp_get_buffer(&v_row_db,0);
		// Convert aRGB input to luma
		vbw_rgb2luma( vW, v_row_in, vT, image_width);
		vbx( SVHBU, VSHR, v_luma_bot, 8,  vW );                                     // convert to byte

vbx_sp_push();
		image_width=image_width/2;
		vbx_set_vl(image_width);

		v[0][0] = v00   = (vbx_ubyte_t*)vbx_sp_malloc( 25 * image_width*sizeof(vbx_ubyte_t));
		v[0][1] = v01   = v00 +  1 * image_width*sizeof(vbx_ubyte_t) ;
		v[0][2] = v02   = v00 +  2 * image_width*sizeof(vbx_ubyte_t) ;
		v[1][0] = v10   = v00 +  3 * image_width*sizeof(vbx_ubyte_t) ;
		v[1][1] = v11   = v00 +  4 * image_width*sizeof(vbx_ubyte_t) ;
		v[1][2] = v12   = v00 +  5 * image_width*sizeof(vbx_ubyte_t) ;
		v[2][0] = v20   = v00 +  6 * image_width*sizeof(vbx_ubyte_t) ;
		v[2][1] = v21   = v00 +  7 * image_width*sizeof(vbx_ubyte_t) ;
		v[2][2] = v22   = v00 +  8 * image_width*sizeof(vbx_ubyte_t) ;

	#if W==5
		v[0][3] = v03   = v00 +  9 * image_width*sizeof(vbx_ubyte_t) ;
		v[0][4] = v04   = v00 + 10 * image_width*sizeof(vbx_ubyte_t) ;
		v[1][3] = v13   = v00 + 11 * image_width*sizeof(vbx_ubyte_t) ;
		v[1][4] = v14   = v00 + 12 * image_width*sizeof(vbx_ubyte_t) ;
		v[2][3] = v23   = v00 + 13 * image_width*sizeof(vbx_ubyte_t) ;
		v[2][4] = v24   = v00 + 14 * image_width*sizeof(vbx_ubyte_t) ;

		v[3][0] = v30   = v00 + 15 * image_width*sizeof(vbx_ubyte_t) ;
		v[3][1] = v31   = v00 + 16 * image_width*sizeof(vbx_ubyte_t) ;
		v[3][2] = v32   = v00 + 17 * image_width*sizeof(vbx_ubyte_t) ;
		v[3][3] = v33   = v00 + 18 * image_width*sizeof(vbx_ubyte_t) ;
		v[3][4] = v34   = v00 + 19 * image_width*sizeof(vbx_ubyte_t) ;

		v[4][0] = v40   = v00 + 20 * image_width*sizeof(vbx_ubyte_t) ;
		v[4][1] = v41   = v00 + 22 * image_width*sizeof(vbx_ubyte_t) ;
		v[4][2] = v42   = v00 + 22 * image_width*sizeof(vbx_ubyte_t) ;
		v[4][3] = v43   = v00 + 23 * image_width*sizeof(vbx_ubyte_t) ;
		v[4][4] = v44   = v00 + 24 * image_width*sizeof(vbx_ubyte_t) ;
	#endif

		if(v00==NULL){
printf("mem alloc failed\n"); fflush(stdout);
			vbx_sp_pop();
			vbx_sp_pop();
			return VBW_ERROR_SP_ALLOC_FAILED;
		}


//FIXME -- how to manage row buffers with 5 rows?  3 rows are shown below:
#if W==3
		for( xx=0; xx<W; xx++ ) v_src[0][xx] = v_luma_top+xx;
		for( xx=0; xx<W; xx++ ) v_src[1][xx] = v_luma_mid+xx;
		for( xx=0; xx<W; xx++ ) v_src[2][xx] = v_luma_bot+xx;
#else
		for( xx=0; xx<W; xx++ ) v_src[0][xx] = v_luma_top+xx;
		for( xx=0; xx<W; xx++ ) v_src[1][xx] = v_luma_hii+xx;
		for( xx=0; xx<W; xx++ ) v_src[2][xx] = v_luma_mid+xx;
		for( xx=0; xx<W; xx++ ) v_src[3][xx] = v_luma_low+xx;
		for( xx=0; xx<W; xx++ ) v_src[4][xx] = v_luma_bot+xx;
#endif

		vbx_set_vl( image_width - W + 1 );

		// compute error (absdiff) in pixel colour with neighbours
		for( yy=0; yy<W; yy++ ) {
			for( xx=0; xx<W; xx++ ) {
				vbx( VVBU, VABSDIFF, v[yy][xx], v_luma_mid+(W/2), v_src[yy][xx] );
			}
		}


		// v[][] holds the errors (differences) between pixels
		// efficiently compute a function that looks approximately something like exp(-x):
		//     large value for small errors, small value for big errors
		for( yy=0; yy<W; yy++ ) {
			for( xx=0; xx<W; xx++ ) {
				vbx( SVBU, VABSDIFF, v[yy][xx], 255, v[yy][xx] );  // 255 - img_err
				// 11 or more iterations is mathematically equivalent to a pure gaussian blur // FIXME is this true?
#define NUM_SHARPEN_ITERATIONS  3   // 0 to 10 iterations, practical max is 7 or 8
				for( sharp=0; sharp < NUM_SHARPEN_ITERATIONS; sharp++ ) {
					vbx( VVBU, VMULHI, v[yy][xx], v[yy][xx], v[yy][xx] ); // v*v;
				}
			}
		}

		// with right decimal place, could do the next two instructions using MULFXP and do as BYTES
		// convolve errors with gaussian blur kernel
		for( yy=0; yy<W; yy++ ) {
			for( xx=0; xx<W; xx++ ) {
				vbx( SVBU, VMULHI, v[yy][xx], gauss[yy][xx], v[yy][xx] );
			}
		}

		// sum up the weights for normalization later
		vbx( VVBHU, VADD, vW, v[0][0], v[0][1] );
		vbx( VVBHU, VADD, vT, v[0][2], v[1][0] );
		vbx( VVHU,  VADD, vW, vW, vT );
		vbx( VVBHU, VADD, vT, v[1][1], v[1][2] );
		vbx( VVHU,  VADD, vW, vW, vT );
		vbx( VVBHU, VADD, vT, v[2][0], v[2][1] );
		vbx( VVHU,  VADD, vW, vW, vT );
		vbx( VVBHU, VMOV, vT, v[2][2], 0 );
		vbx( VVHU,  VADD, vW, vW, vT );
	#if (W==5)
		vbx( VVBHU, VADD, vT, v[3][0], v[3][1] );
		vbx( VVHU,  VADD, vW, vW, vT );
		vbx( VVBHU, VADD, vT, v[3][2], v[4][0] );
		vbx( VVHU,  VADD, vW, vW, vT );
		vbx( VVBHU, VADD, vT, v[4][1], v[4][2] );
		vbx( VVHU,  VADD, vW, vW, vT );
		vbx( VVBHU, VMOV, vT, v[0][3], v[0][4] );
		vbx( VVHU,  VADD, vW, vW, vT );
		vbx( VVBHU, VMOV, vT, v[1][3], v[1][4] );
		vbx( VVHU,  VADD, vW, vW, vT );
		vbx( VVBHU, VMOV, vT, v[2][3], v[2][4] );
		vbx( VVHU,  VADD, vW, vW, vT );
		vbx( VVBHU, VMOV, vT, v[3][3], v[3][4] );
		vbx( VVHU,  VADD, vW, vW, vT );
		vbx( VVBHU, VMOV, vT, v[4][3], v[4][4] );
		vbx( VVHU,  VADD, vW, vW, vT );
	#endif


		// convolve image with new weights
		for( yy=0; yy<W; yy++ ) {
			for( xx=0; xx<W; xx++ ) {
				vbx( VVBU, VMULHI, v[yy][xx], v_src[yy][xx], v[yy][xx] );
				//vbx( SVBU, VMULHI, v[yy][xx], gauss[yy][xx], v_src[yy][xx] );
				//vbx( SVBU, VMUL  , v[yy][xx],         1      , v_src[yy][xx] );
			}
		}



		// sum up the weighted pixels
		vbx( VVBHU, VADD, vI, v[0][0], v[0][1] );
		vbx( VVBHU, VADD, vT, v[0][2], v[1][0] );
		vbx( VVHU,  VADD, vI, vI, vT );
		vbx( VVBHU, VADD, vT, v[1][1], v[1][2] );
		vbx( VVHU,  VADD, vI, vI, vT );
		vbx( VVBHU, VADD, vT, v[2][0], v[2][1] );
		vbx( VVHU,  VADD, vI, vI, vT );
		vbx( VVBHU, VMOV, vT, v[2][2], 0 );
		vbx( VVHU,  VADD, vI, vI, vT );

	#if (W==5)
		vbx( VVBHU, VADD, vT, v[3][0], v[3][1] );
		vbx( VVHU,  VADD, vI, vI, vT );
		vbx( VVBHU, VADD, vT, v[3][2], v[4][0] );
		vbx( VVHU,  VADD, vI, vI, vT );
		vbx( VVBHU, VADD, vT, v[4][1], v[4][2] );
		vbx( VVHU,  VADD, vI, vI, vT );
		vbx( VVBHU, VMOV, vT, v[0][3], v[0][4] );
		vbx( VVHU,  VADD, vI, vI, vT );
		vbx( VVBHU, VMOV, vT, v[1][3], v[1][4] );
		vbx( VVHU,  VADD, vI, vI, vT );
		vbx( VVBHU, VMOV, vT, v[2][3], v[2][4] );
		vbx( VVHU,  VADD, vI, vI, vT );
		vbx( VVBHU, VMOV, vT, v[3][3], v[3][4] );
		vbx( VVHU,  VADD, vI, vI, vT );
		vbx( VVBHU, VMOV, vT, v[4][3], v[4][4] );
		vbx( VVHU,  VADD, vI, vI, vT );
	#endif


// keep RHS of image as original grayscale
image_width=image_width*2;
vbx_set_vl( image_width/2 );
//vbx( VVWHU, VMOV, vT+image_width/2, (v_row_in       ) + image_width/2+1, 0 );
vbx( VVBHU, VMOV, vT+image_width/2, (v_src[ 0 ][ 0 ]) + image_width/2+1, 0 );
vbx_sp_pop(); // don't need v[][] data any more

// compute LHS of image
#if 0
		vbx( VVBHU, VMOV, vT, v_src[2][2], 0 );
		//vbx( SVHU, VSHR, vI,  3, vI );
		//vbx( SVHU, VSHR, vW,  3, vW );
		//vbx( VVHU, VMUL, vT, vI, vW );
		//vbx( SVHU, VSHR, vT,  8, vT );
#else
		uint32_t h = image_width/2;
		vbx( SVHU, VADD, vW, 0x80, vW ); // round
		vbx( SVHU, VSHR, vW,    8, vW );
		vbw_vec_divide_uhalf( vT  , vI  , vW  , h                 );
		//vbw_vec_divide_uhalf( vT+h, vI+h, vW+h, image_width-W+1-h );
#endif
		// ensure LHS doesn't overflow
		vbx( SVHU, VAND, vT, 0xff, vT );

		// Copy the result to the low byte of the output row
		// Trick to copy the low byte (b) to the middle two bytes as well
		// Note that first and last columns are 0
		vbx_set_vl(image_width-W+1);
		vbx(SVHWU, VMULLO, v_row_out+W/2, 0x00010101, vT);

		// blank out left and right edges
		// then DMA the result to the output
		vbx_set_vl(W/2);
		vbx(SVWU, VMOV, v_row_out, COLOUR, 0 );
		vbx(SVWU, VMOV, v_row_out + image_width - (W/2), COLOUR, 0 );
		vbx_dma_to_host( output+(y+1)*image_pitch, v_row_out, image_width*sizeof(vbx_uword_t) );

		// Rotate luma buffers
		vbx_ubyte_t *tmp_ptr;
		tmp_ptr      = v_luma_top;
#if W==3
		v_luma_top   = v_luma_mid;
		v_luma_mid   = v_luma_bot;
		v_luma_bot   = tmp_ptr;
#else
		v_luma_top   = v_luma_hii;
		v_luma_hii   = v_luma_mid;
		v_luma_mid   = v_luma_low;
		v_luma_low   = v_luma_bot;
		v_luma_bot   = tmp_ptr;
#endif

	}

	vbx_sync();
	vbx_sp_pop();

	return VBW_SUCCESS;
}
Пример #24
0
/** Luma Edge Detection.
 *
 * @brief 3x3 Sobel edge detection with 8-bit luma image
 *
 * @param[out] output      32-bit aRGB edge-intensity output
 * @param[in] input        8-bit luma input
 * @param[in] image_width  Image width in pixels
 * @param[in] image_height Image height in pixels
 * @param[in] image_pitch  Distance in pixels between the start of subsequent rows. usually equal to image_width
 * @param[in] renorm       Number of bits to shift the final intensity by to the right
 * @returns Negative on error condition. See vbw_exit_codes.h
 */
int vbw_sobel_luma8_3x3(unsigned *output, unsigned char *input, const short image_width, const short image_height, const short image_pitch, const short renorm)
{
	int y;

	vbx_ubyte_t *v_luma_top, *v_luma_mid, *v_luma_bot;
	vbx_uword_t *v_row_out;

	vbx_uhalf_t *v_sobel_row_top, *v_sobel_row_mid, *v_sobel_row_bot;
	vbx_uhalf_t *v_gradient_x, *v_gradient_y;
	vbx_uhalf_t *v_tmp;

	void *tmp_ptr;

	vbx_sp_push();

	// Allocate space in scratchpad for vectors
	rotating_prefetcher_t v_luma=rotating_prefetcher(3,image_width*sizeof(vbx_ubyte_t),
	                                                 input,input+image_height*image_pitch,
	                                                 image_pitch*sizeof(vbx_ubyte_t));
	v_sobel_row_top = (vbx_uhalf_t *)vbx_sp_malloc(image_width*sizeof(vbx_uhalf_t));
	v_sobel_row_mid = (vbx_uhalf_t *)vbx_sp_malloc(image_width*sizeof(vbx_uhalf_t));
	v_sobel_row_bot = (vbx_uhalf_t *)vbx_sp_malloc(image_width*sizeof(vbx_uhalf_t));
	v_gradient_x    = (vbx_uhalf_t *)vbx_sp_malloc(image_width*sizeof(vbx_uhalf_t));
	v_gradient_y    = (vbx_uhalf_t *)vbx_sp_malloc(image_width*sizeof(vbx_uhalf_t));
	v_row_out       = (vbx_uword_t *)vbx_sp_malloc(image_width*sizeof(vbx_uword_t));
	if(v_row_out==NULL) {
		vbx_sp_pop();
		return VBW_ERROR_SP_ALLOC_FAILED;
	}

	// Transfer the first 3 input rows and interleave first 2 sobel row calculations
	rp_fetch(&v_luma);
	rp_fetch(&v_luma);
	v_luma_top=rp_get_buffer(&v_luma, 0);
	vbw_sobel_3x3_row(v_sobel_row_top, v_luma_top,image_width);
	rp_fetch(&v_luma);
	v_luma_mid=rp_get_buffer(&v_luma, 1);
	vbw_sobel_3x3_row(v_sobel_row_mid, v_luma_mid, image_width);

	// Set top output row to 0
	vbx_set_vl(image_width);
	vbx(SVWU, VMOV, v_row_out, 0, 0);
	vbx_dma_to_host(output, v_row_out, image_width*sizeof(vbx_uword_t));

	// Calculate edges
	for (y = 0; y < image_height-(FILTER_HEIGHT-1); y++) {
		// Transfer the next input row while processing
		rp_fetch(&v_luma);
		v_luma_top=rp_get_buffer(&v_luma,0);
		v_luma_mid=rp_get_buffer(&v_luma,1);
		v_luma_bot=rp_get_buffer(&v_luma,2);
		// Start calculating gradient_x
		vbx_set_vl(image_width);
		vbx(SVBHU, VSHL, v_gradient_x, 1, v_luma_mid); // multiply by 2

		// Calculate gradient_y
		// Apply [1 2 1] matrix to last row in window and calculate absolute difference with pre-computed first row
		vbw_sobel_3x3_row(v_sobel_row_bot, v_luma_bot, image_width);
		vbx(VVH, VABSDIFF, (vbx_half_t*)v_gradient_y, (vbx_half_t*)v_sobel_row_top, (vbx_half_t*)v_sobel_row_bot);

		// Re-use v_sobel_row_top
		v_tmp = v_sobel_row_top;

		// Finish calculating gradient_x
		// Apply [1 2 1]T matrix to all columns
		vbx_set_vl(image_width);
		vbx(VVBHU, VADD, v_tmp, v_luma_top, v_luma_bot);
		vbx(VVHU,  VADD, v_tmp, v_tmp,      v_gradient_x);
		// For each column, calculate absolute difference with 2nd column to the right
		vbx_set_vl(image_width-2);
		vbx(VVH, VABSDIFF, (vbx_half_t*)v_gradient_x, (vbx_half_t*)v_tmp, (vbx_half_t*)v_tmp+2);

		// sum of absoute gradients
		//vbx_set_vl(image_width-2);
		vbx(VVHU, VADD, v_tmp, v_gradient_x,  v_gradient_y);
		vbx(SVHU, VSHR, v_tmp, renorm, v_tmp);

		// Threshold
		vbx(SVHU, VSUB,     v_gradient_y, 255, v_tmp);
		vbx(SVHU, VCMV_LTZ, v_tmp,        255, v_gradient_y);

		// Copy the result to the low byte of the output row
		// Trick to copy the low byte (b) to the middle two bytes as well
		// Note that first and last columns are 0
		//vbx_set_vl(image_width-2);
		vbx(SVHWU, VMULLO, v_row_out+1, 0x00010101, v_tmp);

		// DMA the result to the output
		vbx_dma_to_host(output+(y+1)*image_pitch, v_row_out, image_width*sizeof(vbx_uword_t));


		// Rotate v_sobel_row buffers (for gradient_y)
		tmp_ptr         = (void *)v_sobel_row_top;
		v_sobel_row_top = v_sobel_row_mid;
		v_sobel_row_mid = v_sobel_row_bot;
		v_sobel_row_bot = (vbx_uhalf_t *)tmp_ptr;
	}

	// Set bottom row to 0
	vbx_set_vl(image_width);
	vbx(SVWU, VMOV, v_row_out, 0, 0);
	vbx_dma_to_host(output+(image_height-1)*image_pitch, v_row_out, image_width*sizeof(vbx_uword_t));

	vbx_sync();
	vbx_sp_pop();

	return VBW_SUCCESS;
}
Пример #25
0
//vector version of rgb converter
void vector_blend(
    output_pointer img_out, input_pointer img_in1, input_pointer img_in2,
    unsigned int num_row, unsigned int num_column, intermediate_type blending_const )
{
    intermediate_type *v_img1[2];
    input_type        *v_img2[2];
    intermediate_type *v_temp;

    intermediate_type blending_const_bar = 256-blending_const;
    int j;

    vbx_mxp_t *this_mxp = VBX_GET_THIS_MXP();
    const int VBX_SCRATCHPAD_SIZE = this_mxp->scratchpad_size;
    const int VBX_WIDTH_BYTES     = this_mxp->vector_lanes * sizeof(int);
    const int VBX_DMA_ALIGNMENT   = this_mxp->dma_alignment_bytes;

    unsigned int chunk_size = VBX_SCRATCHPAD_SIZE/((3*sizeof(intermediate_type))+(2*sizeof(input_type)));
    chunk_size = VBX_PAD_UP( chunk_size-(VBX_WIDTH_BYTES-1), VBX_DMA_ALIGNMENT );

    unsigned int chunk_size_old    = chunk_size;
    unsigned int vector_length     = chunk_size;
    unsigned int vector_length_old = vector_length;

    v_img1[0] = (intermediate_type *)vbx_sp_malloc( chunk_size*sizeof(intermediate_type) );
    v_img1[1] = (intermediate_type *)vbx_sp_malloc( chunk_size*sizeof(intermediate_type) );
    v_img2[0] = (input_type        *)vbx_sp_malloc( chunk_size*sizeof(input_type) );
    v_img2[1] = (input_type        *)vbx_sp_malloc( chunk_size*sizeof(input_type) );
    v_temp    = (intermediate_type *)vbx_sp_malloc( chunk_size*sizeof(intermediate_type) );

    if( v_temp == NULL ) {
        VBX_EXIT(0xBADDEAD);
    }

    int bufselect = 0;

    vbx_dma_to_vector( v_img1[bufselect], img_in1, chunk_size*sizeof(input_type) );
    vbx_dma_to_vector( v_img2[bufselect], img_in2, chunk_size*sizeof(input_type) );

    for( j=0; j<num_row*num_column; j+=vector_length_old ) {
        vbx_set_vl(vector_length);

        if( j > 0 ) {
            vbx_dma_to_host( img_out+j-vector_length_old, v_img1[1-bufselect], chunk_size_old*sizeof(output_type) );
        }

        if( (j+vector_length_old) < (num_row*num_column-1) ) {
            if( (j+vector_length_old*2) >= num_row*num_column ) {
                vector_length =  num_row*num_column - j - vector_length_old;
                chunk_size = vector_length;
            }
            vbx_dma_to_vector( v_img1[1-bufselect], img_in1+j+vector_length_old, chunk_size*sizeof(input_type) );
            vbx_dma_to_vector( v_img2[1-bufselect], img_in2+j+vector_length_old, chunk_size*sizeof(input_type) );
        }

        vbx( SVBHU, VMULLO, v_temp,            blending_const,     v_img1[bufselect] );
        vbx( SVBHU, VMULLO, v_img1[bufselect], blending_const_bar, v_img2[bufselect] );
        vbx( VVHU,  VADD,   v_img1[bufselect], v_img1[bufselect],  v_temp );
        vbx( SVHBU, VSHR,   v_img1[bufselect], 8,                  v_img1[bufselect] );

        bufselect = 1-bufselect;
    }

    vbx_dma_to_host( img_out+j-vector_length_old, v_img1[1-bufselect], chunk_size*sizeof(output_type) );
    vbx_sp_free();
    vbx_sync();
}
Пример #26
0
int main_tile()
{
	int i, j, k, l, base, block_num;
	int x, y;

	int time_start, time_stop;
	unsigned int cycles;
	double vbx_time, scalar_time;
	int wrong;

	int total_errors = 0;

	//all of the initialization can be hard coded without any computation
	vbx_mtx_fdct_t *v = vbx_mtx_fdct_init( coeff_v, image );
	vbx_timestamp_start();

	printf("\nGenerating initial data...\n");

	dt *image  = (dt *) malloc( IMAGE_WIDTH * IMAGE_HEIGHT * sizeof(dt) );
	GenerateRandomImage( image, IMAGE_WIDTH, IMAGE_HEIGHT, 0/*seed*/ );

	// Allocate memory to store results.
	// Results are computed BIGTILE_SIZE halfwords at a time.
	const int BIGTILE_SIZE = NUM_TILE_X * NUM_TILE_Y * DCT_SIZE;
	dt *block_s =                   malloc( BIGTILE_SIZE * sizeof(dt) );
	dt *block_v = (dt *) vbx_shared_malloc( BIGTILE_SIZE * sizeof(dt) );
	dt *coeff_v = (dt *) vbx_shared_malloc( BIGTILE_SIZE * sizeof(dt) );

	//Make an uncached 1D version of the coeff matrix
	for (i = 0; i < NUM_TILE_Y; i++) {             // row
		for (j = 0; j < BLOCK_SIZE; j++) {         // row
			for (k = 0; k < NUM_TILE_X; k++) {     // col
				for (l = 0; l < BLOCK_SIZE; l++) { // col
					coeff_v[i*NUM_TILE_X*DCT_SIZE + j*DCT_SIZE + k*BLOCK_SIZE + l] = cs[j][l];
				}
			}
		}
	}

#ifdef DEBUG
	printf("input matrix is:\n");
	for (i = 0; i < BLOCK_SIZE; i++) {
		base = i * BLOCK_SIZE;
		for (j = 0; j < BLOCK_SIZE; j++) {
			printf("%d ", (int) block_s[base + j]);
		}
		printf("\n");
	}
#endif

	printf("\nRunning DCT...\n");

	time_start = vbx_timestamp();
	for( y = 0; y < IMG_DOWN; y++ ) {
		for( x = 0; x < IMG_ACROSS; x++ ) {
			vbx_mtx_fdct_scalar( block_s, (dt*)cs, image, x/*start_x*/, y/*start_y*/, NUM_TILE_X, NUM_TILE_Y );
		}
	}
	time_stop = vbx_timestamp();

	cycles = time_stop - time_start;
	scalar_time = (double) cycles;
	scalar_time /= (double) vbx_timestamp_freq();
	scalar_time *= 1000.0;		//ms
	vbx_timestamp_t mxp_cycles = vbx_mxp_cycles(cycles);

	printf("%dx%d Block Size\n", BLOCK_SIZE, BLOCK_SIZE);
	printf("Finished, scalar CPU took %0.3f ms \n", scalar_time);
	printf(" CPU Cycles: %d\n", (int) mxp_cycles);
	printf(" CPU Cycles per block: %f\n", mxp_cycles / ((double) (NUM_BLOCKS)));

	vbx_sync(); // wait for image to be prefetched

	time_start = vbx_timestamp();
	for( y = 0; y < IMG_DOWN; y++ ) {
		for( x = 0; x < IMG_ACROSS; x++ ) {
			vbx_mtx_fdct( v, block_v, image, x/*start_x*/, y/*start_y*/, IMG_ACROSS-1,IMG_DOWN-1,NUM_TILE_X, NUM_TILE_Y );
		}
	}
	time_stop = vbx_timestamp();

	cycles = time_stop - time_start;
	vbx_time = (double) cycles;
	vbx_time /= (double) vbx_timestamp_freq();
	vbx_time *= 1000.0;			//ms
	mxp_cycles = vbx_mxp_cycles(cycles);

	printf("Finished, MXP took %0.3f ms \n", vbx_time);
	printf(" CPU Cycles: %d\n", (int) mxp_cycles);
	printf(" CPU Cycles per block: %f\n", mxp_cycles / ((double) (NUM_BLOCKS)));
	printf(" Speedup: %f\n", scalar_time / vbx_time);

	vbx_mxp_t *this_mxp = VBX_GET_THIS_MXP();
	double vbx_mbps = (double) (NUM_BLOCKS) * 1000 / vbx_time;	// blocks per second
	printf("V%d@%dMHz: %dx%d tile, %dx%d blocks, %f blocks/s, %f megapixel/s\n",
	       this_mxp->vector_lanes, this_mxp->core_freq / 1000000, 
	       NUM_TILE_Y, NUM_TILE_X, 
	       BLOCK_SIZE, BLOCK_SIZE,
	       vbx_mbps, (vbx_mbps * DCT_SIZE) / 1000000);

	printf("\nChecking results...\n");

	wrong = 0;
	for (block_num = 0; block_num < NUM_BLOCKS; block_num++) {
		for (i = 0; i < BLOCK_SIZE; i++) {
			base = i * BLOCK_SIZE;
			for (j = 0; j < BLOCK_SIZE; j++) {
				if (block_s[block_num * DCT_SIZE + base + j] != block_v[block_num * DCT_SIZE + base + j]) {
					if (wrong < 5) {
						printf("\nError at %d [%d,%d], result is %d, should be %d\n",
							   block_num, i, j, (int) block_v[block_num * DCT_SIZE + base + j],
							   (int) block_s[block_num * DCT_SIZE + base + j]);
					}
					wrong++;
				}
			}
		}
	}

	printf("wrong is %d\n\n", wrong);
	total_errors += wrong;

	free(block_s);
	vbx_shared_free(block_v);
	vbx_shared_free(coeff_v);

	vbx_mtx_fdct_free( v );

	VBX_TEST_END(total_errors);

	return (0);
}
Пример #27
0
Файл: test.c Проект: 8l/mxp
int test_lbp_ci(unsigned short* img, int width, int height)
{

    vbx_uhalf_t* v_a1  = (vbx_uhalf_t*)vbx_sp_malloc(width*sizeof(vbx_uhalf_t));
    vbx_uhalf_t* v_b1  = (vbx_uhalf_t*)vbx_sp_malloc(width*sizeof(vbx_uhalf_t));
    vbx_uhalf_t* v_1h = (vbx_uhalf_t*)vbx_sp_malloc(width*sizeof(vbx_uhalf_t));

    vbx_uhalf_t* v_a2  = (vbx_uhalf_t*)vbx_sp_malloc(width*sizeof(vbx_uhalf_t));
    vbx_uhalf_t* v_b2  = (vbx_uhalf_t*)vbx_sp_malloc(width*sizeof(vbx_uhalf_t));
    vbx_uhalf_t* v_2h  = (vbx_uhalf_t*)vbx_sp_malloc(width*sizeof(vbx_uhalf_t));

    vbx_uhalf_t* v_a4  = (vbx_uhalf_t*)vbx_sp_malloc(width*sizeof(vbx_uhalf_t));
    vbx_uhalf_t* v_b4  = (vbx_uhalf_t*)vbx_sp_malloc(width*sizeof(vbx_uhalf_t));
    vbx_uhalf_t* v_4h = (vbx_uhalf_t*)vbx_sp_malloc(width*sizeof(vbx_uhalf_t));

    vbx_ubyte_t* v_1b  = (vbx_ubyte_t*)vbx_sp_malloc(width*sizeof(vbx_ubyte_t));
    vbx_ubyte_t* v_2b  = (vbx_ubyte_t*)vbx_sp_malloc(width*sizeof(vbx_ubyte_t));
    vbx_ubyte_t* v_4b  = (vbx_ubyte_t*)vbx_sp_malloc(width*sizeof(vbx_ubyte_t));

    unsigned short* lbp1h = (unsigned short*)vbx_shared_malloc(width*sizeof(unsigned short));
    unsigned short* lbp2h = (unsigned short*)vbx_shared_malloc(width*sizeof(unsigned short));
    unsigned short* lbp4h = (unsigned short*)vbx_shared_malloc(width*sizeof(unsigned short));

    unsigned char* lbp1b = (unsigned char*)vbx_shared_malloc(width*sizeof(unsigned char));
    unsigned char* lbp2b = (unsigned char*)vbx_shared_malloc(width*sizeof(unsigned char));
    unsigned char* lbp4b = (unsigned char*)vbx_shared_malloc(width*sizeof(unsigned char));

    img = img + width;

    vbx_dma_to_vector(v_a1, img,         width*sizeof(unsigned short));
    vbx_dma_to_vector(v_b1, img + width, width*sizeof(unsigned short));
    vbx_dma_to_vector(v_a2, img,         width*sizeof(unsigned short));
    vbx_dma_to_vector(v_b2, img + width, width*sizeof(unsigned short));
    vbx_dma_to_vector(v_a4, img,         width*sizeof(unsigned short));
    vbx_dma_to_vector(v_b4, img + width, width*sizeof(unsigned short));
    vbx_sync();

    int i;
    int m = 48;
    for(i=0; i<m; i++){
        v_a1[i] = 0;
        v_b1[i] = 0;
        v_a2[i] = 0;
        v_b2[i] = 0;
        v_a4[i] = 0;
        v_b4[i] = 0;
    }
    int n = 12;
    int src_a1[] = {0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
    int src_b1[] = {0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};

    int src_a2[] = {0, 0, 0, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
    int src_b2[] = {0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};

    int src_a4[] = {0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 0};
    int src_b4[] = {0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0};
    
    for(i=0; i<16; i++){
        v_a1[i] = src_a1[i];
        v_b1[i] = src_b1[i];
        v_a2[i] = src_a2[i];
        v_b2[i] = src_b2[i];
        v_a4[i] = src_a4[i];
        v_b4[i] = src_b4[i];
    }

    vbx_set_vl(width);
    vbx(VVHU, VCUSTOM1, v_1h, v_a1, v_b1); 
    vbx(VVHU, VCUSTOM2, v_2h, v_a2, v_b2); 
    vbx(VVHU, VCUSTOM3, v_4h, v_a4, v_b4); 
    vbx(VVHB, VADD, v_1b, v_1h, ((vbx_byte_t*)v_1h) + 1);
    vbx(VVHB, VADD, v_2b, v_2h, ((vbx_byte_t*)v_2h) + 1);
    vbx(VVHB, VADD, v_4b, v_4h, ((vbx_byte_t*)v_4h) + 1);
    vbx_dma_to_host(lbp1h, v_1h, width*sizeof(unsigned short));
    vbx_dma_to_host(lbp2h, v_2h, width*sizeof(unsigned short));
    vbx_dma_to_host(lbp4h, v_4h, width*sizeof(unsigned short));
    vbx_dma_to_host(lbp1b, v_1b, width*sizeof(unsigned char));
    vbx_dma_to_host(lbp2b, v_2b, width*sizeof(unsigned char));
    vbx_dma_to_host(lbp4b, v_4b, width*sizeof(unsigned char));
    vbx_sync();

    test_print_array_half(v_a1, n);
    test_print_array_half(v_b1, n);
    test_print_hex_array_half(lbp1h, n);
    test_print_hex_array_byte(lbp1b, n);

    test_print_array_half(v_a2, n);
    test_print_array_half(v_b2, n);
    test_print_hex_array_half(lbp2h, n);
    test_print_hex_array_byte(lbp2b, n);

    test_print_array_half(v_a4, n);
    test_print_array_half(v_b4, n);
    test_print_hex_array_half(lbp4h, n);
    test_print_hex_array_byte(lbp4b, n);

    vbx_sp_free();
    vbx_shared_free(lbp1h);
    vbx_shared_free(lbp2h);
    vbx_shared_free(lbp4h);
    vbx_shared_free(lbp1b);
    vbx_shared_free(lbp2b);
    vbx_shared_free(lbp4b);
    return 0;
}
Пример #28
0
void vbx_mtx_fdct( vbx_mtx_fdct_t *v, dt *block_v, dt *image,
	int start_x, int start_y, int end_x, int end_y,int num_tile_x, int num_tile_y )
{
//	vbx_mxp_t *this_mxp = VBX_GET_THIS_MXP();
//	const int VBX_SCRATCHPAD_SIZE = this_mxp->scratchpad_size;
	const int BIG_TILE_SIZE = num_tile_x * num_tile_y * DCT_SIZE;

	int next_x=start_x+1;
	int next_y=start_y;
	int get_next=1;
	if( start_x == end_x   &&   start_y == end_y ) {
		get_next=0;
	}
	if( start_x == end_x ) {
		next_x = 0;
		next_y++;
	} 

	const vbx_half_t *vimageDMA = v->vimage[!v->db]; // dma
//	const vbx_half_t *vblockDMA = v->vblock[!v->db]; // dma // never used directly 

	const vbx_half_t *vimageVPU = v->vimage[ v->db]; // active
	const vbx_half_t *vblockVPU = v->vblock[ v->db]; // active

	const vbx_half_t *vblockTMP = v->vblock[ 2    ]; // temp

	const vbx_half_t *vcoeff    = v->vcoeff;
	const vbx_half_t *vprods    = v->vprods;
	const vbx_half_t *vaccum    = v->vaccum;
	const vbx_half_t *vflags    = v->vflags;

#if DMA
	// First, prefetch the next chunk of the next image for a future call to fdct_tile()
#if NUM_TILE_Y > 1
	if( get_next ) // get row 0
		getBigTileImageY( vimageDMA,
		        image+next_x*NUM_TILE_X*BLOCK_SIZE+next_y*IMAGE_WIDTH*NUM_TILE_Y*BLOCK_SIZE, 0 );
#else
	if( get_next ) // get row 0
		getBigTileImage( vimageDMA,
		        image+next_x*NUM_TILE_X*BLOCK_SIZE+next_y*IMAGE_WIDTH*NUM_TILE_Y*BLOCK_SIZE, 0 );
#endif
#endif

	int r;
	for( r=0; r < BLOCK_SIZE; r++ ) {
		// perform multiply of the whole BIG_TILE with row 'r' of the image matrix -- before had dct matrix switching
		vbx_set_vl( NUM_TILE_X * BLOCK_SIZE );                                                                                              // for the length of tiled rows
		vbx_set_2D( BLOCK_SIZE, NUM_TILE_X*BLOCK_SIZE*sizeof(dt),                                    0, NUM_TILE_X*BLOCK_SIZE*sizeof(dt) ); // for all rows of tiled coeffiencents
		vbx_set_3D( NUM_TILE_Y, NUM_TILE_X * DCT_SIZE*sizeof(dt),     NUM_TILE_X * DCT_SIZE*sizeof(dt),                               0  ); // for all groups Y
		vbx_3D( VVH, VMUL,                                vprods, vimageVPU + r*NUM_TILE_X*BLOCK_SIZE,                            vcoeff); // for all 'columns' of tiled data

#if ACCUMULATE
		// accumulate the multiply operations
#if 0 & USE_ACCUM_FLAGS 
		vbx_set_vl( NUM_TILE_X * BLOCK_SIZE * NUM_TILE_Y * BLOCK_SIZE - (BLOCK_SIZE-1) );
		vbx( VVH, VADD, vaccum, vprods+0, vprods+1 );
		vbx_set_2D( BLOCK_SIZE-2, 0, 0, sizeof(dt) );
		vbx_2D( VVH, VADD, vaccum, vaccum, vprods+2 );
		vbx( VVH, VCMV_Z, vblockTMP+r, vaccum, vflags );
#elif BLOCK4
                //case DCT 4
		vbx_set_vl( NUM_TILE_X * BLOCK_SIZE * NUM_TILE_Y * BLOCK_SIZE - (BLOCK_SIZE-1) );
		vbx( VVH, VADD, vaccum, vprods, vprods+1 );
		vbx( VVH, VADD, vaccum, vaccum, vprods+2 );
		vbx( VVH, VADD, vaccum, vaccum, vprods+3 );
		vbx( VVH, VCMV_Z, vblockTMP+r, vaccum, vflags );
#else
                //correct?
		vbx_set_vl( BLOCK_SIZE );
		vbx_set_2D( BLOCK_SIZE,   NUM_TILE_X*BLOCK_SIZE*sizeof(dt), NUM_TILE_X*BLOCK_SIZE*sizeof(dt), NUM_TILE_X*BLOCK_SIZE*sizeof(dt) );
		vbx_set_3D( NUM_TILE_X,   BLOCK_SIZE*sizeof(dt),            BLOCK_SIZE*sizeof(dt),            BLOCK_SIZE*sizeof(dt) );
#if NUM_TILE_Y == 1
		vbx_acc_3D( VVH, VOR,   vblockTMP + r,      vprods ,  vprods );
#else
		int y; 
		for (y=0; y< NUM_TILE_Y; y++){
			vbx_acc_3D( VVH, VOR,   vblockTMP + r + y*NUM_TILE_X*DCT_SIZE,      vprods+ y*NUM_TILE_X*DCT_SIZE,  vprods+ y*NUM_TILE_X*DCT_SIZE );
		}
#endif
#endif
#endif

#if 0
// dont do DMA READS here yet. a DMA WRITE may still be in progress, give it chance to finish
#if DMA
		// every other iteration, prefetch the next row of the next image
		// NB: with 2D DMA, we could issue this as a single DMA request at the top of the file
		// instead, we must intersperse these 1D DMA strips to ensure they don't block the instruction queue
#if NUM_TILE_Y > 1
		if( !(r&1) && get_next )
			getBigTileImageY( vimageDMA,
			                  image+next_x*NUM_TILE_X*BLOCK_SIZE+next_y*IMAGE_WIDTH*NUM_TILE_Y*BLOCK_SIZE,
			                  (1+((r-1)>>1)) ); //BLOCK_SIZE/2 rows added
#else
		if( !(r&1) && get_next )
			getBigTileImage( vimageDMA,
			                 image+next_x*NUM_TILE_X*BLOCK_SIZE+next_y*IMAGE_WIDTH*NUM_TILE_Y*BLOCK_SIZE,
			                 (1+((r-1)>>1)) ); //BLOCK_SIZE/2 rows added
#endif
#endif
#endif
	}

	vbx_set_vl( NUM_TILE_X * BLOCK_SIZE * NUM_TILE_Y * BLOCK_SIZE );
	vbx( SVH, VSHR, vblockTMP, SHIFT_AMOUNT, vblockTMP );

	// now do the transposed version

	for( r=0; r < BLOCK_SIZE; r++ ) {
		// perform multiply of the whole BIG_TILE with row 'r' of the image matrix -- before had dct matrix switching
		vbx_set_vl( NUM_TILE_X * BLOCK_SIZE );                                                                                              // for the length of tiled rows
		vbx_set_2D( BLOCK_SIZE, NUM_TILE_X * BLOCK_SIZE*sizeof(dt),     NUM_TILE_X * BLOCK_SIZE*sizeof(dt),                            0 ); // for all 'columns' of tiled data 
		vbx_set_3D( NUM_TILE_Y, NUM_TILE_X * DCT_SIZE*sizeof(dt),       NUM_TILE_X * DCT_SIZE*sizeof(dt),                              0 ); // for all groups Y
		vbx_3D( VVH, VMUL,                             vprods,                        vblockTMP,  vcoeff + r*NUM_TILE_X*BLOCK_SIZE); // for all rows of tiled coeffients 

#if ACCUMULATE
		// accumulate the multiply operations
#if 0 & USE_ACCUM_FLAGS
		vbx_set_vl( NUM_TILE_X * BLOCK_SIZE * NUM_TILE_Y * BLOCK_SIZE - (BLOCK_SIZE-1) );
		vbx( VVH, VADD, vaccum, vprods+0, vprods+1 );
		vbx_set_2D( BLOCK_SIZE-2, 0, 0, sizeof(dt) );
		vbx_2D( VVH, VADD, vaccum, vaccum, vprods+2 );
		vbx( VVH, VCMV_Z, vblockVPU+r, vaccum,   vflags );

#elif BLOCK4
		//case DCT 4
		vbx_set_vl( NUM_TILE_X * BLOCK_SIZE * NUM_TILE_Y * BLOCK_SIZE - (BLOCK_SIZE-1) );
		vbx( VVH, VADD, vaccum, vprods, vprods+1 );
		vbx( VVH, VADD, vaccum, vaccum, vprods+2 );
		vbx( VVH, VADD, vaccum, vaccum, vprods+3 );
		//vbx( VVH, VCMV_Z, vblockVPU+r, vaccum, vflags );
		vbx_set_vl( NUM_TILE_X * BLOCK_SIZE - (BLOCK_SIZE-1) );                    // for the length of a tiled row
		vbx_set_2D( BLOCK_SIZE, 1*sizeof(dt), NUM_TILE_X*BLOCK_SIZE*sizeof(dt), 0);// for all tiled rows 
#if NUM_TILE_Y == 1
		vbx_2D(VVH, VCMV_Z, vblockVPU+r*NUM_TILE_X*BLOCK_SIZE, vaccum, vflags  );  // 
#else
		int y;
		for (y=0; y< NUM_TILE_Y; y++){
			vbx_2D(VVH, VCMV_Z, vblockVPU+r*NUM_TILE_X*BLOCK_SIZE + y*NUM_TILE_X*DCT_SIZE , vaccum+y*NUM_TILE_X*DCT_SIZE, vflags  );  // 
		}
#endif
#else
		//correct?
		vbx_set_vl( BLOCK_SIZE );                                                                                              // for the length of a row
		vbx_set_2D( BLOCK_SIZE,   sizeof(dt), NUM_TILE_X*BLOCK_SIZE*sizeof(dt), NUM_TILE_X*BLOCK_SIZE*sizeof(dt) );            // for all rows in that block
		vbx_set_3D( NUM_TILE_X,   BLOCK_SIZE*sizeof(dt),            BLOCK_SIZE*sizeof(dt),            BLOCK_SIZE*sizeof(dt) ); // for all tiled blocks horizontally(x)
#if NUM_TILE_Y == 1
		vbx_acc_3D( VVH, VOR,   vblockVPU + r*NUM_TILE_X*BLOCK_SIZE ,    vprods ,  vprods );
#else
		int y;
		for (y=0; y< NUM_TILE_Y; y++){ 
			vbx_acc_3D( VVH, VOR,   vblockVPU + r*NUM_TILE_X*BLOCK_SIZE + y*NUM_TILE_X*DCT_SIZE,      vprods+ y*NUM_TILE_X*DCT_SIZE,  vprods+ y*NUM_TILE_X*DCT_SIZE );
		}
#endif
#endif
#endif

#if DMA
		// every other iteration, prefetch the next row of the next image
		// NB: with 2D DMA, we could issue this as a single DMA request at the top of the file
		// instead, we must intersperse these 1D DMA strips to ensure they don't block the instruction queue
#if NUM_TILE_Y > 1
		//if( !(r&1) && r<(BLOCK_SIZE-1)  && get_next )
		if( get_next )
			getBigTileImageY( 
			                  vimageDMA,
			                  image+next_x*NUM_TILE_X*BLOCK_SIZE+next_y*IMAGE_WIDTH*NUM_TILE_Y*BLOCK_SIZE,
			                  r );
			                  //(BLOCK_SIZE/2 +1+((r-1)>>1)) ); // BLOCK/2 -1 rows
#else
		//if( !(r&1) && r<(BLOCK_SIZE-1)  && get_next )
		if( get_next )
			getBigTileImage( vimageDMA,
			                 image+next_x*NUM_TILE_X*BLOCK_SIZE+next_y*IMAGE_WIDTH*NUM_TILE_Y*BLOCK_SIZE,
			                 r );
			                 //(BLOCK_SIZE/2 +1+((r-1)>>1)) ); // BLOCK/2 -1 rows
#endif
#endif
	}

	vbx_set_vl( NUM_TILE_X * BLOCK_SIZE * NUM_TILE_Y * BLOCK_SIZE );
	vbx( SVH, VSHR, vblockVPU, SHIFT_AMOUNT, vblockVPU );
#if DMA2
	// Write result back to memory as one big block
	vbx_dma_to_host( block_v, vblockVPU, BIG_TILE_SIZE*sizeof(dt) );
#endif 

	v->db = !v->db;
#ifdef DEBUG 
	{
		vbx_sync();
		int i,j;
		printf("%d\n", !db);
		for(i=0;i<BLOCK_SIZE*NUM_TILE_Y;i++){
			for(j=0;j<BLOCK_SIZE*NUM_TILE_X;j++){
				printf(" %4d", block_v[i*BLOCK_SIZE*NUM_TILE_X+j]);
			}
			printf("\n");
		}
	}
#endif
}
Пример #29
0
Файл: test.c Проект: 8l/mxp
int compare_vbx_lut_to_vbx_lut_ci(int stage, int max_print_errors)
{
	vbx_mxp_t *this_mxp = VBX_GET_THIS_MXP();
	int vci_lanes = this_mxp->vcustom0_lanes;
    int sz = this_mxp->scratchpad_size/(16*sizeof(vbx_ubyte_t));

    vbx_byte_t* v_pass = (vbx_byte_t*)vbx_sp_malloc(sz*sizeof(vbx_byte_t));
    vbx_ubyte_t* v_pattern = (vbx_ubyte_t*)vbx_sp_malloc(sz*sizeof(vbx_byte_t));
    vbx_ubyte_t* v_lutc = (vbx_ubyte_t*)vbx_sp_malloc(sz*sizeof(vbx_byte_t));
    vbx_ubyte_t* v_group = (vbx_ubyte_t*)vbx_sp_malloc(sz*sizeof(vbx_byte_t));
    vbx_ubyte_t* v_sel = (vbx_ubyte_t*)vbx_sp_malloc(sz*sizeof(vbx_byte_t));
    vbx_ubyte_t* v_lut = (vbx_ubyte_t*)vbx_sp_malloc(sz*sizeof(vbx_word_t));
    vbx_ubyte_t* v_idx = (vbx_ubyte_t*)vbx_sp_malloc(sz*sizeof(vbx_word_t));
    if(v_idx == NULL) {
        printf("failed to allocate in compare_vbx_lut_to_vbx_lut_ci\n");
    }

    unsigned char* lut = (unsigned char*)vbx_shared_malloc(sz*sizeof(unsigned char));
    unsigned char* lut_c = (unsigned char*)vbx_shared_malloc(sz*sizeof(unsigned char));

    int f, n, s, errors = 0;
    for (n = 0; n < sz; n++) {
        v_pattern[n] = (n & 0xff);
    }

    for (f = 0; f < face_lbp[stage].count; f++) {
        lbp_feat_t feat = face_lbp[stage].feats[f];

        vbx_set_vl(sz);
        int total = f;
        s = 0;
        while(s < stage){
            total += face_lbp[s].count;
            s++;
        }

        if(total < 256) {
            vbx(SVBU, VLBPLUT, v_lutc, total, v_pattern);
        } else {
            vbx(SVBS, VLBPLUT, v_lutc, total-256, v_pattern);
        }

        vbx(SVB, VMOV, v_pass, feat.fail, 0);
        /* check if pattern is in lut */
        vbx(SVBU, VSHR, v_group, 5, v_pattern);
        for (n = 0; n < 8; n++) {
            vbx(SVB, VADD, v_sel, -n, v_group);
            vbx(SVBW, VCMV_Z, v_lut, feat.lut[n], v_sel);
        }

        vbx(SVBWU, VAND, v_idx, 0x1f, v_pattern);
        vbx(VVWB, VSHR, v_lut, v_idx, v_lut);
        vbx(SVB, VAND, v_lut, 1, v_lut);
        vbx(SVB, VCMV_LEZ, v_pass, feat.pass, v_lut);

        vbx_dma_to_host(lut_c, v_lutc, sz*sizeof(unsigned char));
        vbx_dma_to_host(lut, v_pass, sz*sizeof(unsigned char));
        vbx_sync();

        errors += match_array_byte(lut, lut_c, "custom_lut", sz, 1, 0, max_print_errors, 0, 0);

    }
    vbx_sp_free();
    vbx_shared_free(lut);
    vbx_shared_free(lut_c);
    return errors;
}
Пример #30
0
int vbw_mtx_xp_ext(vbx_mm_t *out, vbx_mm_t *in, const int INROWS, const int INCOLS )
{
	typedef vbx_mm_t vbx_sp_t;

	int elements = INROWS * INCOLS;

	if(elements < SCALAR_THRESHOLD) {
		vbx_sync();  //in case we input is waiting on a DMA transfer
		int i,j;
		for(i = 0; i < INROWS; i++) {
			for(j = 0; j < INCOLS; j++) {
				out[j*INROWS+i] = in[i*INCOLS+j];
			}
		}
		return VBW_SUCCESS;
	}

	vbx_sp_push();

	vbx_sp_t *v_in;
	vbx_sp_t *v_out;

	int tile_height     = 0;
	int tile_width      = 0;
	int prev_tile_width = 0;
	int tile_y          = 0;
	int tile_x          = 0;

	vbx_mxp_t *this_mxp = VBX_GET_THIS_MXP();
	int SP_WIDTH_B = this_mxp->scratchpad_alignment_bytes;
	int SP_SIZE = vbx_sp_getfree();
	int max_sp_elements   = vbx_sp_getfree() / sizeof(vbx_sp_t);
	int max_tile_elements = VBX_PAD_DN( SP_SIZE/2, SP_WIDTH_B ) / sizeof(vbx_sp_t);


	if( INROWS == 1 || INCOLS == 1 ) {           // 1D transpose becomes a simple copy operation
		if( elements <= max_sp_elements ) {      // We can use the whole scratchpad for this
			v_in = (vbx_sp_t*)vbx_sp_malloc( elements * sizeof(vbx_sp_t) );
			vbx_dma_to_vector( v_in, in, elements*sizeof(vbx_mm_t) );
			v_out = v_in;
			vbx_dma_to_host( out, v_out, elements*sizeof(vbx_mm_t) );
		} else {                                 // To test this, you'll need a very large 1D matrix (or a small SP)
			tile_width = max_sp_elements;
			v_in = (vbx_sp_t*)vbx_sp_malloc( tile_width * sizeof(vbx_sp_t) );
			for (tile_x = 0; tile_x < elements; tile_x += tile_width) {
				if( tile_x + tile_width > elements) tile_width = elements - tile_x;
				vbx_dma_to_vector( v_in, in + tile_x, tile_width*sizeof(vbx_mm_t) );
				v_out = v_in;
				vbx_dma_to_host( out+tile_x, v_out, tile_width*sizeof(vbx_mm_t) );
			}
		}
	} else if( elements < max_tile_elements ) {  // Matrix is small enough to handle entirely in SP
		v_in  = (vbx_sp_t*)vbx_sp_malloc( elements * sizeof(vbx_sp_t) );
		v_out = (vbx_sp_t*)vbx_sp_malloc( elements * sizeof(vbx_sp_t) );

		vbx_dma_to_vector( v_in, in, elements*sizeof(vbx_mm_t) );

		vbw_mtx_xp(v_out,v_in,INROWS,INCOLS);

		vbx_dma_to_host( out, v_out, elements*sizeof(vbx_mm_t) );
	} else {                                     // At this point we know at least one full tile will be needed
		#define QUICK_A_LANES_THRESHOLD 8        // Use merge transpose if there are at least this many lanes
		#define QUICK_A_TILE_WIDTH 128
		#define QUICK_A_TILE_ELEMENTS (QUICK_A_TILE_WIDTH*QUICK_A_TILE_WIDTH)
		#define QUICK_A_VF_ELEMENTS (QUICK_A_TILE_ELEMENTS/2)
		#define QUICK_A_REQ_ELEMENTS (2*VBX_PAD_UP(QUICK_A_TILE_ELEMENTS,SP_WIDTH_B/sizeof(vbx_sp_t)) + VBX_PAD_UP(QUICK_A_VF_ELEMENTS,sizeof(vbx_sp_t)))

		#define QUICK_B_LANES_THRESHOLD 16        // Use smaller merge transpose tile only if there are a lot of lanes
		#define QUICK_B_TILE_WIDTH 64             //     and only if larger tile A size cannot be used.
		#define QUICK_B_TILE_ELEMENTS (QUICK_B_TILE_WIDTH*QUICK_B_TILE_WIDTH)
		#define QUICK_B_VF_ELEMENTS (QUICK_B_TILE_ELEMENTS/2)
		#define QUICK_B_REQ_ELEMENTS (2*VBX_PAD_UP(QUICK_B_TILE_ELEMENTS,SP_WIDTH_B/sizeof(vbx_sp_t)) + VBX_PAD_UP(QUICK_B_VF_ELEMENTS,sizeof(vbx_sp_t)))

		int NUM_LANES = this_mxp->vector_lanes;
		int DMA_BYTES = this_mxp->dma_alignment_bytes;
		int min_tile_dim = DMA_BYTES / sizeof(vbx_sp_t);

		vbx_sp_t *v_out_sel;
		vbx_sp_t *vf = 0;

		if( NUM_LANES >= QUICK_A_LANES_THRESHOLD       // Check for appropriate conditions to use merge transpose tiles
					&& INCOLS >= QUICK_A_TILE_WIDTH
					&& INROWS >= QUICK_A_TILE_WIDTH
			&& (unsigned)max_sp_elements >= QUICK_A_REQ_ELEMENTS ) {
			tile_width = tile_height = QUICK_A_TILE_WIDTH;
			vf = (vbx_sp_t *)vbx_sp_malloc( QUICK_A_VF_ELEMENTS * sizeof(vbx_sp_t));
		} else if( NUM_LANES >= QUICK_B_LANES_THRESHOLD
					&& INCOLS >= QUICK_B_TILE_WIDTH
					&& INROWS >= QUICK_B_TILE_WIDTH
			&& (unsigned)max_sp_elements >= QUICK_B_REQ_ELEMENTS ) {
			tile_width = tile_height = QUICK_B_TILE_WIDTH;
			vf = (vbx_sp_t *)vbx_sp_malloc( QUICK_B_VF_ELEMENTS * sizeof(vbx_sp_t));
		} else {
			findTileSize( &tile_height, &tile_width, INROWS, INCOLS, max_tile_elements, min_tile_dim );
		}

		prev_tile_width = tile_width;

		v_in  = (vbx_sp_t*)vbx_sp_malloc( tile_height*tile_width * sizeof(vbx_sp_t) );
		v_out = (vbx_sp_t*)vbx_sp_malloc( tile_height*tile_width * sizeof(vbx_sp_t) );


		if( v_out==NULL ) {
			vbx_sp_pop();
			return VBW_ERROR_SP_ALLOC_FAILED;
		}

		vbx_sp_t *v[2] = { v_in, v_out };

		tile_y = 0;                              // Reset y position for new col
		while( tile_y < INROWS ) {
		vbx_set_2D( tile_width, tile_height*sizeof(vbx_sp_t), sizeof(vbx_sp_t), sizeof(vbx_sp_t) );
		vbx_set_3D( tile_height, sizeof(vbx_sp_t), tile_width*sizeof(vbx_sp_t), tile_width*sizeof(vbx_sp_t) );
			tile_x = 0;                          // Reset x position for new row
			while( tile_x < INCOLS ) {

				vbx_dma_to_vector_2D(
						v_in,
						in+(tile_y*INCOLS)+tile_x,
						tile_width*sizeof(vbx_mm_t),
						tile_height,
						tile_width*sizeof(vbx_sp_t),
						INCOLS*sizeof(vbx_mm_t) );

				v_out_sel = v_out;                         // select v_out as default vector to DMA to MM

				/* *** merge transpose (matrix must be square and a power of 2 wide) *** */
				if( vf && tile_width == tile_height
							&& (tile_width==QUICK_A_TILE_WIDTH || tile_width==QUICK_B_TILE_WIDTH) ) {
					int src = 0;
					int n;
					for( n=1; n<tile_width; n *= 2 ) {     // can't do 1st iteration until entire tile is DMA'd in
						const int nn = 2*n;

						// copy the destination matrix
						vbx_set_vl( tile_width*tile_width );    // use v_in & v_out as working matrices (clobber v_in)
						vbxx(  VMOV, v[!src], v[src]);

						// do the work
						vbx_set_vl( n*tile_width );
						vbxx( VAND, vf, n, (vbx_enum_t*)0 );           // mask for merging: 0101010... then 00110011...
						vbx_set_2D( tile_width/nn, nn*tile_width*sizeof(vbx_sp_t), nn*tile_width*sizeof(vbx_sp_t), 0 );
						vbxx_2D( VCMV_Z, v[!src]+n*tile_width, v[src]+n           , vf );
						vbxx_2D( VCMV_Z, v[!src]+n,            v[src]+n*tile_width, vf );

						src = !src;
					}

					v_out_sel = v[src];     // depending on the size of the mtx, the final result may be in v_in or v_out
				} else {
					vbx_set_vl( 1 );        // 2D and 3D will be set by the x and y edge conditions, even using merge
					vbxx_3D(VMOV, v_out, v_in );
				}

				vbx_dma_to_host_2D(
						out+(tile_x*INROWS)+tile_y,
						v_out_sel,
						tile_height*sizeof(vbx_mm_t),
						tile_width,
						INROWS*sizeof(vbx_mm_t),
						tile_height*sizeof(vbx_sp_t) );

				tile_x += tile_width;                 // Set up width for next tile
				if( tile_x + tile_width > INCOLS ) {  // Temporarily reduce tile width when reaching right edge of matrix
					tile_width = INCOLS - tile_x;
					vbx_set_2D( tile_width, tile_height*sizeof(vbx_sp_t), sizeof(vbx_sp_t), sizeof(vbx_sp_t) );
					vbx_set_3D( tile_height, sizeof(vbx_sp_t), tile_width*sizeof(vbx_sp_t), tile_width*sizeof(vbx_sp_t) );
				}
			}
			tile_y += tile_height;                    // Set up width and height for next row of tiles
			tile_width = prev_tile_width;             // Restore original tile width for next row of tiles

			/* *** Permanently reduce tile height when reaching bottom of matrix *** */
			tile_height = ( tile_y + tile_height > INROWS ) ? INROWS - tile_y : tile_height;		}
	}
	vbx_sp_pop();
	vbx_sync();
	return VBW_SUCCESS;
}