Exemplo n.º 1
0
boost::optional<double> SimpleClean::FindPeakAVX(const double *image, size_t width, size_t height, size_t& x, size_t& y, size_t startY, size_t endY, size_t horizontalBorder, size_t verticalBorder)
{
	double peakMax = std::numeric_limits<double>::min();
	size_t peakIndex = 0;
	
	__m256d mPeakMax = _mm256_set1_pd(peakMax);
	
	size_t xiStart = horizontalBorder, xiEnd = width - horizontalBorder;
	size_t yiStart = std::max(startY, verticalBorder), yiEnd = std::min(endY, height - verticalBorder);
	if(xiEnd < xiStart) xiEnd = xiStart;
	if(yiEnd < yiStart) yiEnd = yiStart;
	
	for(size_t yi=yiStart; yi!=yiEnd; ++yi)
	{
		size_t index = yi*width + xiStart;
		const double* const endPtr = image + yi*width + xiEnd - 4;
		const double *i=image + index;
		for(; i<endPtr; i+=4)
		{
			__m256d val = _mm256_loadu_pd(i);
			if(AllowNegativeComponent) {
				__m256d negVal = _mm256_sub_pd(_mm256_set1_pd(0.0), val);
				val = _mm256_max_pd(val, negVal);
			}
			int mask = _mm256_movemask_pd(_mm256_cmp_pd(val, mPeakMax, _CMP_GT_OQ));
			if(mask != 0)
			{
				for(size_t di=0; di!=4; ++di)
				{
					double value = i[di];
					if(AllowNegativeComponent) value = std::fabs(value);
					if(value > peakMax)
					{
						peakIndex = index+di;
						peakMax = std::fabs(i[di]);
						mPeakMax = _mm256_set1_pd(peakMax);
					}
				}
			}
			index+=4;
		}
		for(; i!=endPtr+4; ++i)
		{
			double value = *i;
			if(AllowNegativeComponent) value = std::fabs(value);
			if(value > peakMax)
			{
				peakIndex = index;
				peakMax = std::fabs(*i);
			}
			++index;
		}
	}
	x = peakIndex % width;
	y = peakIndex / width;
	return image[x + y*width];
}
Exemplo n.º 2
0
void calculate_fma_double (unsigned char * out, double X0, double Y0, double scale, unsigned YSTART, unsigned SX, unsigned SY)
{
    __m256d dd = _mm256_set1_pd (scale);
    __m256d XX0 = _mm256_set1_pd (X0);

    for (unsigned j = YSTART; j < SY; j++)	{
        __m256d y0 = _mm256_set1_pd (j*scale + Y0);
        for (unsigned i = 0; i < SX; i += 4)	{

            __m128i ind = _mm_setr_epi32 (i, i + 1, i + 2, i + 3);
            __m256d x0 = _mm256_fmadd_pd (dd, _mm256_cvtepi32_pd (ind), XX0);
            __m256d x = x0;
            __m256d y = y0;
            __m256i counts = _mm256_setzero_si256 ();
            __m256i cmp_mask = _mm256_set1_epi32 (0xFFFFFFFFu);

            for (unsigned n = 0; n < 255; n++)	{
                __m256d x2 = _mm256_mul_pd (x, x);
                __m256d y2 = _mm256_mul_pd (y, y);
                __m256d abs = _mm256_add_pd (x2, y2);
                __m256i cmp = _mm256_castpd_si256 (_mm256_cmp_pd (abs, _mm256_set1_pd (4), 1));
                cmp_mask = _mm256_and_si256 (cmp_mask, cmp);
                if (_mm256_testz_si256 (cmp_mask, cmp_mask)) {
                    break;
                }
                counts = _mm256_sub_epi64 (counts, cmp_mask);
                __m256d t = _mm256_add_pd (x, x);
                y = _mm256_fmadd_pd (t, y, y0);
                x = _mm256_add_pd (_mm256_sub_pd (x2, y2), x0);
            }
            __m256i result = _mm256_shuffle_epi8 (counts, _mm256_setr_epi8 (0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8));
            *(uint32_t*) out = _mm_extract_epi16 (_mm256_extracti128_si256 (result, 0), 0) | (_mm_extract_epi16 (_mm256_extracti128_si256 (result, 1), 0) << 16);
            out += 4;
        }
    }
}
double bst_compute_129_m256_maskstore_root_aligned( void*_bst_obj, double* p, double* q, size_t nn ) {
    segments_t* mem = (segments_t*) _bst_obj;
    int n, i, r, l_end, j, l_end_pre;
    double t, e_tmp;
    double* e = mem->e, *w = mem->w;
    int* root = mem->r;
    __m256d v_tmp;
    __m256d v00, v01, v02, v03;
    __m256d v10, v11, v12, v13;
    __m256d v20, v21, v22, v23;
    __m256d v30, v31, v32, v33;
    __m256i v_cur_roots;
    __m256 v_rootmask0, v_rootmask1;
    // initialization
    // mem->n = nn;
    n = nn; // subtractions with n potentially negative. say hello to all the bugs

    int idx1, idx1_root;
    int idx2;
    int idx3, idx3_root;
    int pad_root, pad, pad_r;
    
    idx1      = ((int) mem->e_sz) - 1;
    idx1_root = ((int) mem->r_sz);
    // the conventio is that iteration i, idx1 points to the first element of line i+1
    e[idx1++] = q[n];
    
    // pad contains the padding for row i+1
    // for row n it's always 3
    pad = 3;
    pad_root = 7;
    for (i = n-1; i >= 0; --i) {
        idx1      -= 2*(n-i)+1 + pad;
        idx1_root -= 2*(n-i)+1 + pad_root;
        idx2       = idx1 + 1;
        e[idx1]    = q[i];
        w[idx1]    = q[i];
        for (j = i+1; j < n+1; ++j,++idx2) {
            e[idx2] = INFINITY;
            w[idx2] = w[idx2-1] + p[j-1] + q[j];
        }
        idx2     += pad; // padding of line i+1
        // idx2 now points to the first element of the next line

        idx3      = idx1;
        idx3_root = idx1_root;
        pad_r     = pad;
        for (r = i; r < n; ++r) {
            pad_r     = (pad_r+1)&3; // padding of line r+1
            idx1      = idx3;
            idx1_root = idx3_root;
            l_end     = idx2 + (n-r);
            // l_end points to the first entry after the current row
            e_tmp     = e[idx1++];
            idx1_root++;
            // calculate until a multiple of 8 doubles is left
            // 8 = 4 * 2 128-bit vectors
            l_end_pre = idx2 + ((n-r)&15);
            for( ; (idx2 < l_end_pre) && (idx2 < l_end); ++idx2 ) {
                t = e_tmp + e[idx2] + w[idx1];
                if (t < e[idx1]) {
                    e[idx1] = t;
                    root[idx1_root] = r;
                }
                idx1++;
                idx1_root++;
            }
            
            v_tmp = _mm256_set_pd( e_tmp, e_tmp, e_tmp, e_tmp );
            // execute the shit for 4 vectors of size 2
            v_cur_roots = _mm256_set_epi32(r, r, r, r, r, r, r, r);
            for( ; idx2 < l_end; idx2 += 16 ) {
                v01 = _mm256_load_pd( &w[idx1   ] );
                v11 = _mm256_load_pd( &w[idx1+ 4] );
                v21 = _mm256_load_pd( &w[idx1+ 8] );
                v31 = _mm256_load_pd( &w[idx1+12] );

                v00 = _mm256_load_pd( &e[idx2   ] );
                v01 = _mm256_add_pd( v01, v_tmp ); 
                v10 = _mm256_load_pd( &e[idx2+ 4] );
                v11 = _mm256_add_pd( v11, v_tmp );
                v20 = _mm256_load_pd( &e[idx2+ 8] );
                v21 = _mm256_add_pd( v21, v_tmp );
                v30 = _mm256_load_pd( &e[idx2+12] );
                v31 = _mm256_add_pd( v31, v_tmp );

                v01 = _mm256_add_pd( v01, v00 );
                v03 = _mm256_load_pd( &e[idx1   ] );
                v11 = _mm256_add_pd( v11, v10 );
                v13 = _mm256_load_pd( &e[idx1+ 4] );
                v21 = _mm256_add_pd( v21, v20 );
                v23 = _mm256_load_pd( &e[idx1+ 8] );
                v31 = _mm256_add_pd( v31, v30 );
                v33 = _mm256_load_pd( &e[idx1+12] );

                v02 = _mm256_cmp_pd( v01, v03, _CMP_LT_OQ );
                v12 = _mm256_cmp_pd( v11, v13, _CMP_LT_OQ );
                v22 = _mm256_cmp_pd( v21, v23, _CMP_LT_OQ );
                v32 = _mm256_cmp_pd( v31, v33, _CMP_LT_OQ );

                _mm256_maskstore_pd( &e[idx1   ],
                        _mm256_castpd_si256( v02 ), v01 );
                _mm256_maskstore_pd( &e[idx1+ 4],
                        _mm256_castpd_si256( v12 ), v11 );

                v_rootmask0 = _mm256_insertf128_ps(
                        _mm256_castps128_ps256(
                            _mm256_cvtpd_ps(v02)),
                            _mm256_cvtpd_ps(v12) , 1
                    );

                _mm256_maskstore_pd( &e[idx1+ 8],
                        _mm256_castpd_si256( v22 ), v21 );
                _mm256_maskstore_pd( &e[idx1+12], 
                        _mm256_castpd_si256( v32 ), v31 );
                v_rootmask1 = _mm256_insertf128_ps(
                        _mm256_castps128_ps256(
                            _mm256_cvtpd_ps(v22)),
                            _mm256_cvtpd_ps(v32) , 1
                    );
                
                _mm256_maskstore_ps( &root[idx1_root    ],
                        _mm256_castps_si256( v_rootmask0 ),
                        _mm256_castsi256_ps( v_cur_roots ) );
                _mm256_maskstore_ps( &root[idx1_root + 8],
                        _mm256_castps_si256( v_rootmask1 ),
                        _mm256_castsi256_ps( v_cur_roots ) );
                idx1      += 16;
                idx1_root += 16;
            }
            idx2 += pad_r;
            idx3++;
            idx3_root++;
        }
        pad      = (pad     -1)&3;
        pad_root = (pad_root-1)&7;
    }
    // the index of the last item of the first row is ((n/4)+1)*4-1, due to the padding
    // if n is even, the total number of entries in the first
    // row of the table is odd, so we need padding
    return e[ ((n/4)+1)*4 - 1 ];
}
Exemplo n.º 4
0
// Main:
int main()
{
    int retval = 0;
    bool inited = false, buffersCreated = false, started = false;
    char *error = NULL;

    drv.sampleRate = 44100.0;

    // Initialize FX parameters:
    fx.f0_gain.init();
    fx.f1_compressor.init();

    // Set our own inputs:
    for (int i = 0; i < icr; ++i)
    {
        fx.f0_gain.input.gain[i] = _mm256_set1_pd(0);   // dB

        fx.f1_compressor.input.threshold[i] = _mm256_set1_pd(-30);  // dBFS
        fx.f1_compressor.input.attack[i]    = _mm256_set1_pd(1.0);  // msec
        fx.f1_compressor.input.release[i]   = _mm256_set1_pd(80);   // msec
        fx.f1_compressor.input.ratio[i]     = _mm256_set1_pd(0.25); // N:1
        fx.f1_compressor.input.gain[i]      = _mm256_set1_pd(6);    // dB
    }

    // Calculate input-dependent values:
    fx.f0_gain.recalc();
    fx.f1_compressor.recalc();

    // FX parameters are all set.

#ifdef NOT_LIVE
    // Test mode:

#if 0
    const auto t0 = mm256_if_then_else(_mm256_cmp_pd(_mm256_set1_pd(-1.0), _mm256_set1_pd(0.0), _CMP_LT_OQ), _mm256_set1_pd(0.0), _mm256_set1_pd(-1.0));
    printvec_dB(t0);
    printf("\n\n");
    const auto p0 = mm256_if_then_else(_mm256_cmp_pd(_mm256_set1_pd(-1.0), _mm256_set1_pd(0.0), _CMP_LT_OQ), _mm256_set1_pd(0.0), _mm256_set1_pd(1.0));
    printvec_dB(t0);
    printf("\n\n");
    const auto t1 = mm256_if_then_else(_mm256_cmp_pd(_mm256_set1_pd(0.0), _mm256_set1_pd(0.0), _CMP_LT_OQ), _mm256_set1_pd(0.0), _mm256_set1_pd(-1.0));
    printvec_dB(t1);
    printf("\n\n");
    const auto p1 = mm256_if_then_else(_mm256_cmp_pd(_mm256_set1_pd(0.0), _mm256_set1_pd(0.0), _CMP_LT_OQ), _mm256_set1_pd(0.0), _mm256_set1_pd(1.0));
    printvec_dB(t1);
    printf("\n\n");
    goto done;
#endif

    vec8_i32 in, out;
    long long c = 0LL;
    for (int i = 0; i < 20; ++i)
    {
        for (int n = 0; n < 48; ++n, ++c)
        {
            double s = sin(2.0 * 3.14159265358979323846 * (double)c / drv.sampleRate);
            int si = (int)(s * INT_MAX / 2);

            in = _mm256_set1_epi32(si);

            processEffects(in, out, 0);
        }

#if 1
        printf("samp:   ");
        printvec_samp(in);
        printf("\n");

        printf("input:  ");
        for (int n = 0; n < icr; ++n)
        {
            printvec_dB(fx.fi_monitor.levels[n]);
            if (n < icr - 1) printf(" ");
        }
        printf("\n");

        printf("gain:   ");
        for (int n = 0; n < icr; ++n)
        {
            printvec_dB(fx.f0_output.levels[n]);
            if (n < icr - 1) printf(" ");
        }
        printf("\n");

        printf("comp:   ");
        for (int n = 0; n < icr; ++n)
        {
            printvec_dB(fx.fo_monitor.levels[n]);
            if (n < icr - 1) printf(" ");
        }
        printf("\n");

        printf("samp:   ");
        printvec_samp(out);
        printf("\n\n");
#endif
    }
#else
    // ASIO live engine mode:
    if (!loadAsioDriver("UA-1000"))
    {
        error = "load failed.";
        goto err;
    }

    if (ASIOInit(&drv.driver) != ASE_OK)
        goto err;

    inited = true;

    if (ASIOGetChannels(&drv.inputChannels, &drv.outputChannels) != ASE_OK)
        goto err;

    printf("in: %d, out %d\n", drv.inputChannels, drv.outputChannels);

    if (ASIOGetBufferSize(&drv.minSize, &drv.maxSize, &drv.preferredSize, &drv.granularity) != ASE_OK)
        goto err;

    printf("min buf size: %d, preferred: %d, max buf size: %d\n", drv.minSize, drv.preferredSize, drv.maxSize);

    if (ASIOGetSampleRate(&drv.sampleRate) != ASE_OK)
        goto err;

    printf("rate: %f\n\n", drv.sampleRate);

    if (ASIOOutputReady() == ASE_OK)
        drv.postOutput = true;
    else
        drv.postOutput = false;

    // fill the bufferInfos from the start without a gap
    ASIOBufferInfo *info = drv.bufferInfos;

    // prepare inputs (Though this is not necessarily required, no opened inputs will work, too
    if (drv.inputChannels > kMaxInputChannels)
        drv.inputBuffers = kMaxInputChannels;
    else
        drv.inputBuffers = drv.inputChannels;
    for (int i = 0; i < drv.inputBuffers; i++, info++)
    {
        info->isInput = ASIOTrue;
        info->channelNum = i;
        info->buffers[0] = info->buffers[1] = 0;
    }

    // prepare outputs
    if (drv.outputChannels > kMaxOutputChannels)
        drv.outputBuffers = kMaxOutputChannels;
    else
        drv.outputBuffers = drv.outputChannels;
    for (int i = 0; i < drv.outputBuffers; i++, info++)
    {
        info->isInput = ASIOFalse;
        info->channelNum = i;
        info->buffers[0] = info->buffers[1] = 0;
    }

    asioCallbacks.asioMessage = asioMessage;
    asioCallbacks.bufferSwitch = bufferSwitch;
    asioCallbacks.bufferSwitchTimeInfo = bufferSwitchTimeInfo;

    // Create the buffers:
    if (ASIOCreateBuffers(drv.bufferInfos, drv.inputBuffers + drv.outputBuffers, drv.preferredSize, &asioCallbacks) != ASE_OK)
        goto err;
    else
        buffersCreated = true;

    // now get all the buffer details, sample word length, name, word clock group and activation
    for (int i = 0; i < drv.inputBuffers + drv.outputBuffers; i++)
    {
        drv.channelInfos[i].channel = drv.bufferInfos[i].channelNum;
        drv.channelInfos[i].isInput = drv.bufferInfos[i].isInput;
        if (ASIOGetChannelInfo(&drv.channelInfos[i]) != ASE_OK)
            goto err;

        //printf("%s[%2d].type = %d\n", drv.channelInfos[i].isInput ? "in " : "out", drv.channelInfos[i].channel, drv.channelInfos[i].type);
        if (drv.channelInfos[i].type != ASIOSTInt32LSB)
        {
            error = "Application assumes sample types of ASIOSTInt32LSB!";
            goto err;
        }
    }

    // get the input and output latencies
    // Latencies often are only valid after ASIOCreateBuffers()
    // (input latency is the age of the first sample in the currently returned audio block)
    // (output latency is the time the first sample in the currently returned audio block requires to get to the output)
    if (ASIOGetLatencies(&drv.inputLatency, &drv.outputLatency) != ASE_OK)
        goto err;

    printf ("latencies: input: %d, output: %d\n", drv.inputLatency, drv.outputLatency);

    // Start the engine:
    if (ASIOStart() != ASE_OK)
        goto err;
    else
        started = true;

    printf("Engine started.\n\n");
    const int total_time = 30;
    for (int i = 0; i < total_time; ++i)
    {
        printf("Engine running %2d.   \r", total_time - i);
        Sleep(1000);
    }
#endif

    goto done;

err:
    if (error == NULL)
        error = drv.driver.errorMessage;

    if (error != NULL)
        fprintf(stderr, "%s\r\n", error);

    retval = -1;

done:
    if (started)
        ASIOStop();
    if (buffersCreated)
        ASIODisposeBuffers();
    if (inited)
        ASIOExit();
    return retval;
}
Exemplo n.º 5
0
// Process audio effects for 8 channels simultaneously:
void processEffects(const vec8_i32 &inpSamples, vec8_i32 &outSamples, const long n)
{
    // Extract int samples and convert to doubles:
    const vec4_d64 ds0 = _mm256_div_pd(
        _mm256_cvtepi32_pd(_mm256_extractf128_si256(inpSamples, 0)),
        _mm256_set1_pd((double)INT_MAX)
        );
    const vec4_d64 ds1 = _mm256_div_pd(
        _mm256_cvtepi32_pd(_mm256_extractf128_si256(inpSamples, 1)),
        _mm256_set1_pd((double)INT_MAX)
        );

    // Monitor input levels:
    fx.fi_monitor.levels[n + 0] = scalar_to_dBFS(ds0);
    fx.fi_monitor.levels[n + 1] = scalar_to_dBFS(ds1);

    vec4_d64 s0, s1;

    // f0_gain:
    {
        s0 = _mm256_mul_pd(ds0, fx.f0_gain.calc.gain[n + 0]);
        s1 = _mm256_mul_pd(ds1, fx.f0_gain.calc.gain[n + 1]);
    }

    // Monitor levels:
    fx.f0_output.levels[n + 0] = scalar_to_dBFS(s0);
    fx.f0_output.levels[n + 1] = scalar_to_dBFS(s1);

    // f1_compressor:
    {
        const vec4_dBFS l0 = scalar_to_dBFS_offs(s0);
        const vec4_dBFS l1 = scalar_to_dBFS_offs(s1);

        // over = s - thresh
        vec4_dB over0 = _mm256_sub_pd(l0, fx.f1_compressor.input.threshold[n + 0]);
        vec4_dB over1 = _mm256_sub_pd(l1, fx.f1_compressor.input.threshold[n + 1]);

        // over = if over < 0.0 then 0.0 else over;
        over0 = mm256_if_then_else(_mm256_cmp_pd(over0, _mm256_set1_pd(0.0), _CMP_LT_OQ), _mm256_set1_pd(0.0), over0);
        over1 = mm256_if_then_else(_mm256_cmp_pd(over1, _mm256_set1_pd(0.0), _CMP_LT_OQ), _mm256_set1_pd(0.0), over1);

        // over += DC_OFFSET
        over0 = _mm256_add_pd(over0, DC_OFFSET);
        over1 = _mm256_add_pd(over1, DC_OFFSET);

        // env = over + coef * ( env - over )
        const vec4_dB attack_env0  = _mm256_add_pd(over0, _mm256_mul_pd(fx.f1_compressor.calc.attack_coef[n + 0], _mm256_sub_pd(fx.f1_compressor.state.env[n + 0], over0)));
        const vec4_dB attack_env1  = _mm256_add_pd(over1, _mm256_mul_pd(fx.f1_compressor.calc.attack_coef[n + 1], _mm256_sub_pd(fx.f1_compressor.state.env[n + 1], over1)));
        const vec4_dB release_env0  = _mm256_add_pd(over0, _mm256_mul_pd(fx.f1_compressor.calc.release_coef[n + 0], _mm256_sub_pd(fx.f1_compressor.state.env[n + 0], over0)));
        const vec4_dB release_env1  = _mm256_add_pd(over1, _mm256_mul_pd(fx.f1_compressor.calc.release_coef[n + 1], _mm256_sub_pd(fx.f1_compressor.state.env[n + 1], over1)));

        // env = if over > env then attack_env else release_env
        fx.f1_compressor.state.env[n + 0] = mm256_if_then_else(_mm256_cmp_pd(over0, fx.f1_compressor.state.env[n + 0], _CMP_GT_OQ), attack_env0, release_env0);
        fx.f1_compressor.state.env[n + 1] = mm256_if_then_else(_mm256_cmp_pd(over1, fx.f1_compressor.state.env[n + 1], _CMP_GT_OQ), attack_env1, release_env1);

        // over = env - DC_OFFSET
        over0 = _mm256_sub_pd(fx.f1_compressor.state.env[n + 0], DC_OFFSET);
        over1 = _mm256_sub_pd(fx.f1_compressor.state.env[n + 1], DC_OFFSET);

        // grdB = ( over * ( ratio - 1.0 ) )
        vec4_dB gr0dB = _mm256_mul_pd(over0, fx.f1_compressor.calc.ratio_min_1[n + 0]);
        vec4_dB gr1dB = _mm256_mul_pd(over1, fx.f1_compressor.calc.ratio_min_1[n + 1]);

        // gr = dB_to_scalar(grdB)
        fx.f1_compressor.monitor.gain_reduction[n + 0] = dB_to_scalar(gr0dB);
        fx.f1_compressor.monitor.gain_reduction[n + 1] = dB_to_scalar(gr1dB);

        // Apply gain reduction to inputs:
        s0 = _mm256_mul_pd(s0, fx.f1_compressor.monitor.gain_reduction[n + 0]);
        s1 = _mm256_mul_pd(s1, fx.f1_compressor.monitor.gain_reduction[n + 1]);

        // Apply make-up gain:
        s0 = _mm256_mul_pd(s0, fx.f1_compressor.calc.gain[n + 0]);
        s1 = _mm256_mul_pd(s1, fx.f1_compressor.calc.gain[n + 1]);
    }

    // Monitor output levels:
    fx.fo_monitor.levels[n + 0] = scalar_to_dBFS(s0);
    fx.fo_monitor.levels[n + 1] = scalar_to_dBFS(s1);

    // TODO(jsd): Better limiter implementation!
    // Limit final samples:
    s0 = _mm256_max_pd(_mm256_min_pd(s0, _mm256_set1_pd((double)1.0)), _mm256_set1_pd((double)-1.0));
    s1 = _mm256_max_pd(_mm256_min_pd(s1, _mm256_set1_pd((double)1.0)), _mm256_set1_pd((double)-1.0));

    // Convert doubles back to 32-bit ints:
    s0 = _mm256_mul_pd(s0, _mm256_set1_pd((double)INT_MAX));
    s1 = _mm256_mul_pd(s1, _mm256_set1_pd((double)INT_MAX));
    const vec8_i32 os = _mm256_setr_m128i(_mm256_cvtpd_epi32(s0), _mm256_cvtpd_epi32(s1));

    // Write outputs:
    _mm256_stream_si256(&outSamples, os);
}
Exemplo n.º 6
0
 inline vector4db operator<=(const vector4d& lhs, const vector4d& rhs)
 {
     return _mm256_cmp_pd(lhs, rhs, _CMP_LE_OQ);
 }
Exemplo n.º 7
0
 inline vector4db operator==(const vector4d& lhs, const vector4d& rhs)
 {
     return _mm256_cmp_pd(lhs, rhs _CMP_EQ_OQ);
 }
Exemplo n.º 8
0
 inline vector4db operator!=(const vector4db& lhs, const vector4db& rhs)
 {
     return _mm256_cmp_pd(lhs, rhs, _CMP_NEQ_OQ);
 }
Exemplo n.º 9
0
inline F64vec4 mask_lt(const F64vec4 &l, const F64vec4 &r)
{
    return _mm256_cmp_pd(l, r, _CMP_LT_OS);
}
Exemplo n.º 10
0
BI_FORCE_INLINE inline avx_double operator>=(const avx_double& o1,
    const avx_double& o2) {
  avx_double res;
  res.packed = _mm256_cmp_pd(o1.packed, o2.packed, _CMP_GE_OQ);
  return res;
}
static inline __m256d gmx_mm256_exp2_pd(__m256d x)
{
    /* Lower bound: We do not allow numbers that would lead to an IEEE fp representation exponent smaller than -126. */
    const __m256d arglimit = _mm256_set1_pd(1022.0);
    const __m128i expbase  = _mm_set1_epi32(1023);

    const __m256d P2       = _mm256_set1_pd(2.30933477057345225087e-2);
    const __m256d P1       = _mm256_set1_pd(2.02020656693165307700e1);
    const __m256d P0       = _mm256_set1_pd(1.51390680115615096133e3);
    /* Q2 == 1.0 */
    const __m256d Q1       = _mm256_set1_pd(2.33184211722314911771e2);
    const __m256d Q0       = _mm256_set1_pd(4.36821166879210612817e3);
    const __m256d one      = _mm256_set1_pd(1.0);
    const __m256d two      = _mm256_set1_pd(2.0);

    __m256d       valuemask;
    __m256i       iexppart;
    __m128i       iexppart128a, iexppart128b;
    __m256d       fexppart;
    __m256d       intpart;
    __m256d       z, z2;
    __m256d       PolyP, PolyQ;

    iexppart128a  = _mm256_cvtpd_epi32(x);
    intpart       = _mm256_round_pd(x, _MM_FROUND_TO_NEAREST_INT);

    /* Add exponent bias */
    iexppart128a   = _mm_add_epi32(iexppart128a, expbase);

    /* We now want to shift the exponent 52 positions left, but to achieve this we need
     * to separate the 128-bit register data into two registers (4x64-bit > 128bit)
     * shift them, and then merge into a single __m256d.
     * Elements 0/1 should end up in iexppart128a, and 2/3 in iexppart128b.
     * It doesnt matter what we put in the 2nd/4th position, since that data will be
     * shifted out and replaced with zeros.
     */
    iexppart128b   = _mm_shuffle_epi32(iexppart128a, _MM_SHUFFLE(3, 3, 2, 2));
    iexppart128a   = _mm_shuffle_epi32(iexppart128a, _MM_SHUFFLE(1, 1, 0, 0));

    iexppart128b   = _mm_slli_epi64(iexppart128b, 52);
    iexppart128a   = _mm_slli_epi64(iexppart128a, 52);

    iexppart  = _mm256_castsi128_si256(iexppart128a);
    iexppart  = _mm256_insertf128_si256(iexppart, iexppart128b, 0x1);

    valuemask = _mm256_cmp_pd(arglimit, gmx_mm256_abs_pd(x), _CMP_GE_OQ);
    fexppart  = _mm256_and_pd(valuemask, _mm256_castsi256_pd(iexppart));

    z         = _mm256_sub_pd(x, intpart);

    z2        = _mm256_mul_pd(z, z);

    PolyP     = _mm256_mul_pd(P2, z2);
    PolyP     = _mm256_add_pd(PolyP, P1);
    PolyQ     = _mm256_add_pd(z2, Q1);
    PolyP     = _mm256_mul_pd(PolyP, z2);
    PolyQ     = _mm256_mul_pd(PolyQ, z2);
    PolyP     = _mm256_add_pd(PolyP, P0);
    PolyQ     = _mm256_add_pd(PolyQ, Q0);
    PolyP     = _mm256_mul_pd(PolyP, z);

    z         = _mm256_mul_pd(PolyP, gmx_mm256_inv_pd(_mm256_sub_pd(PolyQ, PolyP)));
    z         = _mm256_add_pd(one, _mm256_mul_pd(two, z));

    z         = _mm256_mul_pd(z, fexppart);

    return z;
}
Exemplo n.º 12
0
void rnn_r_int_d8x4_var3(
    int    k,
    int    r,
    double *aa,
    double *a,
    double *bb,
    double *b,
    double *c,
    char   *flag,
    aux_t  *aux
    )
{
  int    i;
  double neg2 = -2.0;
  double dzero = 0.0;
  v4df_t c03_0, c03_1, c03_2, c03_3;
  v4df_t c47_0, c47_1, c47_2, c47_3;
  v4df_t tmpc03_0, tmpc03_1, tmpc03_2, tmpc03_3;
  v4df_t tmpc47_0, tmpc47_1, tmpc47_2, tmpc47_3;
  v4df_t c_tmp;
  v4df_t a03, a47;
  v4df_t A03, A47; // prefetched A 

  v4df_t b0, b1, b2, b3;
  v4df_t B0; // prefetched B
  v4df_t aa_tmp, bb_tmp;


  int    k_iter = k / 2;
  int    k_left = k % 2;

  double *D = aux->D;

  __asm__ volatile( "prefetcht0 0(%0)    \n\t" : :"r"( a ) );
  __asm__ volatile( "prefetcht2 0(%0)    \n\t" : :"r"( aux->b_next ) );
  __asm__ volatile( "prefetcht0 0(%0)    \n\t" : :"r"( c ) );


  c03_0.v = _mm256_setzero_pd();
  c03_1.v = _mm256_setzero_pd();
  c03_2.v = _mm256_setzero_pd();
  c03_3.v = _mm256_setzero_pd();
  c47_0.v = _mm256_setzero_pd();
  c47_1.v = _mm256_setzero_pd();
  c47_2.v = _mm256_setzero_pd();
  c47_3.v = _mm256_setzero_pd();


  // Load a03
  a03.v = _mm256_load_pd(      (double*)a         );
  // Load a47
  a47.v = _mm256_load_pd(      (double*)( a + 4 ) );
  // Load (b0,b1,b2,b3)
  b0.v  = _mm256_load_pd(      (double*)b         );

  for ( i = 0; i < k_iter; ++i ) {
    __asm__ volatile( "prefetcht0 192(%0)    \n\t" : :"r"(a) );

    // Preload A03
    A03.v = _mm256_load_pd(      (double*)( a + 8 ) );

    c_tmp.v = _mm256_mul_pd( a03.v  , b0.v    );
    c03_0.v = _mm256_add_pd( c_tmp.v, c03_0.v );
    c_tmp.v = _mm256_mul_pd( a47.v  , b0.v    );
    c47_0.v = _mm256_add_pd( c_tmp.v, c47_0.v );

    // Preload A47
    A47.v = _mm256_load_pd(      (double*)( a + 12 ) );

    // Shuffle b ( 1, 0, 3, 2 )
    b1.v  = _mm256_shuffle_pd( b0.v, b0.v, 0x5 );

    c_tmp.v = _mm256_mul_pd( a03.v  , b1.v    );
    c03_1.v = _mm256_add_pd( c_tmp.v, c03_1.v );
    c_tmp.v = _mm256_mul_pd( a47.v  , b1.v    );
    c47_1.v = _mm256_add_pd( c_tmp.v, c47_1.v );

    // Permute b ( 3, 2, 1, 0 )
    b2.v  = _mm256_permute2f128_pd( b1.v, b1.v, 0x1 );

    // Preload B0
    B0.v  = _mm256_load_pd(      (double*)( b + 4 ) );

    c_tmp.v = _mm256_mul_pd( a03.v  , b2.v    );
    c03_2.v = _mm256_add_pd( c_tmp.v, c03_2.v );
    c_tmp.v = _mm256_mul_pd( a47.v  , b2.v    );
    c47_2.v = _mm256_add_pd( c_tmp.v, c47_2.v );

    // Shuffle b ( 3, 2, 1, 0 )
    b3.v  = _mm256_shuffle_pd( b2.v, b2.v, 0x5 );

    c_tmp.v = _mm256_mul_pd( a03.v  , b3.v    );
    c03_3.v = _mm256_add_pd( c_tmp.v, c03_3.v );
    c_tmp.v = _mm256_mul_pd( a47.v  , b3.v    );
    c47_3.v = _mm256_add_pd( c_tmp.v, c47_3.v );


    // Iteration #1
    __asm__ volatile( "prefetcht0 512(%0)    \n\t" : :"r"(a) );

    // Preload a03 ( next iteration )
    a03.v = _mm256_load_pd(      (double*)( a + 16 ) );

    c_tmp.v = _mm256_mul_pd( A03.v  , B0.v    );
    c03_0.v = _mm256_add_pd( c_tmp.v, c03_0.v );

    b1.v  = _mm256_shuffle_pd( B0.v, B0.v, 0x5 );

    c_tmp.v = _mm256_mul_pd( A47.v  , B0.v    );
    c47_0.v = _mm256_add_pd( c_tmp.v, c47_0.v );
    c_tmp.v = _mm256_mul_pd( A03.v  , b1.v    );
    c03_1.v = _mm256_add_pd( c_tmp.v, c03_1.v );

    // Preload a47 ( next iteration )
    a47.v = _mm256_load_pd(      (double*)( a + 20 ) );

    // Permute b ( 3, 2, 1, 0 )
    b2.v  = _mm256_permute2f128_pd( b1.v, b1.v, 0x1 );

    c_tmp.v = _mm256_mul_pd( A47.v  , b1.v    );
    c47_1.v = _mm256_add_pd( c_tmp.v, c47_1.v );
    c_tmp.v = _mm256_mul_pd( A03.v  , b2.v    );
    c03_2.v = _mm256_add_pd( c_tmp.v, c03_2.v );

    // Shuffle b ( 3, 2, 1, 0 )
    b3.v  = _mm256_shuffle_pd( b2.v, b2.v, 0x5 );

    c_tmp.v = _mm256_mul_pd( A47.v  , b2.v    );
    c47_2.v = _mm256_add_pd( c_tmp.v, c47_2.v );

    // Load b0 ( next iteration )
    b0.v  = _mm256_load_pd(      (double*)( b + 8 ) );

    c_tmp.v = _mm256_mul_pd( A03.v  , b3.v    );
    c03_3.v = _mm256_add_pd( c_tmp.v, c03_3.v );
    c_tmp.v = _mm256_mul_pd( A47.v  , b3.v    );
    c47_3.v = _mm256_add_pd( c_tmp.v, c47_3.v );

    a += 16;
    b += 8;
  }

  for ( i = 0; i < k_left; ++i ) {
    a03.v = _mm256_load_pd(      (double*)a         );
    //printf( "a03 = %lf, %lf, %lf, %lf\n", a03.d[0], a03.d[1], a03.d[2], a03.d[3] );

    a47.v = _mm256_load_pd(      (double*)( a + 4 ) );
    //printf( "a47 = %lf, %lf, %lf, %lf\n", a47.d[0], a47.d[1], a47.d[2], a47.d[3] );

    b0.v  = _mm256_load_pd(      (double*)b         );
    //printf( "b0  = %lf, %lf, %lf, %lf\n", b0.d[0], b0.d[1], b0.d[2], b0.d[3] );

    c_tmp.v = _mm256_mul_pd( a03.v  , b0.v    );
    c03_0.v = _mm256_add_pd( c_tmp.v, c03_0.v );
    c_tmp.v = _mm256_mul_pd( a47.v  , b0.v    );
    c47_0.v = _mm256_add_pd( c_tmp.v, c47_0.v );

    // Shuffle b ( 1, 0, 3, 2 )
    b1.v  = _mm256_shuffle_pd( b0.v, b0.v, 0x5 );

    c_tmp.v = _mm256_mul_pd( a03.v  , b1.v    );
    c03_1.v = _mm256_add_pd( c_tmp.v, c03_1.v );
    c_tmp.v = _mm256_mul_pd( a47.v  , b1.v    );
    c47_1.v = _mm256_add_pd( c_tmp.v, c47_1.v );

    // Permute b ( 3, 2, 1, 0 )
    b2.v  = _mm256_permute2f128_pd( b1.v, b1.v, 0x1 );

    c_tmp.v = _mm256_mul_pd( a03.v  , b2.v    );
    c03_2.v = _mm256_add_pd( c_tmp.v, c03_2.v );
    c_tmp.v = _mm256_mul_pd( a47.v  , b2.v    );
    c47_2.v = _mm256_add_pd( c_tmp.v, c47_2.v );

    // Shuffle b ( 3, 2, 1, 0 )
    b3.v  = _mm256_shuffle_pd( b2.v, b2.v, 0x5 );

    c_tmp.v = _mm256_mul_pd( a03.v  , b3.v    );
    c03_3.v = _mm256_add_pd( c_tmp.v, c03_3.v );
    c_tmp.v = _mm256_mul_pd( a47.v  , b3.v    );
    c47_3.v = _mm256_add_pd( c_tmp.v, c47_3.v );

    a += 8;
    b += 4;
  }
 

  // Prefetch aa and bb
  __asm__ volatile( "prefetcht0 0(%0)    \n\t" : :"r"( aa ) );
  __asm__ volatile( "prefetcht0 0(%0)    \n\t" : :"r"( bb ) );


  tmpc03_0.v = _mm256_blend_pd( c03_0.v, c03_1.v, 0x6 );
  tmpc03_1.v = _mm256_blend_pd( c03_1.v, c03_0.v, 0x6 );
  
  tmpc03_2.v = _mm256_blend_pd( c03_2.v, c03_3.v, 0x6 );
  tmpc03_3.v = _mm256_blend_pd( c03_3.v, c03_2.v, 0x6 );

  tmpc47_0.v = _mm256_blend_pd( c47_0.v, c47_1.v, 0x6 );
  tmpc47_1.v = _mm256_blend_pd( c47_1.v, c47_0.v, 0x6 );

  tmpc47_2.v = _mm256_blend_pd( c47_2.v, c47_3.v, 0x6 );
  tmpc47_3.v = _mm256_blend_pd( c47_3.v, c47_2.v, 0x6 );

  c03_0.v    = _mm256_permute2f128_pd( tmpc03_0.v, tmpc03_2.v, 0x30 );
  c03_3.v    = _mm256_permute2f128_pd( tmpc03_2.v, tmpc03_0.v, 0x30 );

  c03_1.v    = _mm256_permute2f128_pd( tmpc03_1.v, tmpc03_3.v, 0x30 );
  c03_2.v    = _mm256_permute2f128_pd( tmpc03_3.v, tmpc03_1.v, 0x30 );

  c47_0.v    = _mm256_permute2f128_pd( tmpc47_0.v, tmpc47_2.v, 0x30 );
  c47_3.v    = _mm256_permute2f128_pd( tmpc47_2.v, tmpc47_0.v, 0x30 );

  c47_1.v    = _mm256_permute2f128_pd( tmpc47_1.v, tmpc47_3.v, 0x30 );
  c47_2.v    = _mm256_permute2f128_pd( tmpc47_3.v, tmpc47_1.v, 0x30 );

  //printf( "rank-k\n" );
  //printf( "%lf, %lf, %lf, %lf\n", c03_0.d[0], c03_1.d[0], c03_2.d[0], c03_3.d[0] );
  //printf( "%lf, %lf, %lf, %lf\n", c03_0.d[1], c03_1.d[1], c03_2.d[1], c03_3.d[1] );
  //printf( "%lf, %lf, %lf, %lf\n", c03_0.d[2], c03_1.d[2], c03_2.d[2], c03_3.d[2] );
  //printf( "%lf, %lf, %lf, %lf\n", c03_0.d[3], c03_1.d[3], c03_2.d[3], c03_3.d[3] );
  //printf( "%lf, %lf, %lf, %lf\n", c47_0.d[0], c47_1.d[0], c47_2.d[0], c47_3.d[0] );
  //printf( "%lf, %lf, %lf, %lf\n", c47_0.d[1], c47_1.d[1], c47_2.d[1], c47_3.d[1] );
  //printf( "%lf, %lf, %lf, %lf\n", c47_0.d[2], c47_1.d[2], c47_2.d[2], c47_3.d[2] );
  //printf( "%lf, %lf, %lf, %lf\n", c47_0.d[3], c47_1.d[3], c47_2.d[3], c47_3.d[3] );

  __asm__ volatile( "prefetcht0 0(%0)    \n\t" : :"r"( aux->I ) );
  __asm__ volatile( "prefetcht0 0(%0)    \n\t" : :"r"( aux->D ) );


  //for ( i = 0; i < k; i++ ) {
  //  a03.v = _mm256_load_pd(      (double*)a         );
  //  a47.v = _mm256_load_pd(      (double*)( a + 4 ) );
  //  b0.v  = _mm256_broadcast_sd( (double*)b         );
  //  b1.v  = _mm256_broadcast_sd( (double*)( b + 1 ) );
  //  b2.v  = _mm256_broadcast_sd( (double*)( b + 2 ) );
  //  b3.v  = _mm256_broadcast_sd( (double*)( b + 3 ) );

  //  a += DKS_MR;
  //  b += DKS_NR;

  //  c_tmp.v = _mm256_mul_pd( a03.v  , b0.v    );
  //  c03_0.v = _mm256_add_pd( c_tmp.v, c03_0.v );
  //  c_tmp.v = _mm256_mul_pd( a03.v  , b1.v    );
  //  c03_1.v = _mm256_add_pd( c_tmp.v, c03_1.v );
  //  c_tmp.v = _mm256_mul_pd( a03.v  , b2.v    );
  //  c03_2.v = _mm256_add_pd( c_tmp.v, c03_2.v );
  //  c_tmp.v = _mm256_mul_pd( a03.v  , b3.v    );
  //  c03_3.v = _mm256_add_pd( c_tmp.v, c03_3.v );

  //  c_tmp.v = _mm256_mul_pd( a47.v  , b0.v    );
  //  c47_0.v = _mm256_add_pd( c_tmp.v, c47_0.v );
  //  c_tmp.v = _mm256_mul_pd( a47.v  , b1.v    );
  //  c47_1.v = _mm256_add_pd( c_tmp.v, c47_1.v );
  //  c_tmp.v = _mm256_mul_pd( a47.v  , b2.v    );
  //  c47_2.v = _mm256_add_pd( c_tmp.v, c47_2.v );
  //  c_tmp.v = _mm256_mul_pd( a47.v  , b3.v    );
  //  c47_3.v = _mm256_add_pd( c_tmp.v, c47_3.v );
  //}
  
  aa_tmp.v = _mm256_broadcast_sd( &neg2 );
  //c03_0.v  = _mm256_mul_pd( aa_tmp.v, c03_0.v );
  //c03_1.v  = _mm256_mul_pd( aa_tmp.v, c03_1.v );
  //c03_2.v  = _mm256_mul_pd( aa_tmp.v, c03_2.v );
  //c03_3.v  = _mm256_mul_pd( aa_tmp.v, c03_3.v );
  //c47_0.v  = _mm256_mul_pd( aa_tmp.v, c47_0.v );
  //c47_1.v  = _mm256_mul_pd( aa_tmp.v, c47_1.v );
  //c47_2.v  = _mm256_mul_pd( aa_tmp.v, c47_2.v );
  //c47_3.v  = _mm256_mul_pd( aa_tmp.v, c47_3.v );
  //
  c03_0.v  = _mm256_mul_pd( aa_tmp.v, c03_0.v );
  c03_1.v  = _mm256_mul_pd( aa_tmp.v, c03_1.v );
  c03_2.v  = _mm256_mul_pd( aa_tmp.v, c03_2.v );
  c03_3.v  = _mm256_mul_pd( aa_tmp.v, c03_3.v );
  c47_0.v  = _mm256_mul_pd( aa_tmp.v, c47_0.v );
  c47_1.v  = _mm256_mul_pd( aa_tmp.v, c47_1.v );
  c47_2.v  = _mm256_mul_pd( aa_tmp.v, c47_2.v );
  c47_3.v  = _mm256_mul_pd( aa_tmp.v, c47_3.v );


  //printf( "scale -2 \n" );
  //printf( "%lf, %lf, %lf, %lf\n", c03_0.d[0], c03_1.d[0], c03_2.d[0], c03_3.d[0] );
  //printf( "%lf, %lf, %lf, %lf\n", c03_0.d[1], c03_1.d[1], c03_2.d[1], c03_3.d[1] );
  //printf( "%lf, %lf, %lf, %lf\n", c03_0.d[2], c03_1.d[2], c03_2.d[2], c03_3.d[2] );
  //printf( "%lf, %lf, %lf, %lf\n", c03_0.d[3], c03_1.d[3], c03_2.d[3], c03_3.d[3] );
  //printf( "%lf, %lf, %lf, %lf\n", c47_0.d[0], c47_1.d[0], c47_2.d[0], c47_3.d[0] );
  //printf( "%lf, %lf, %lf, %lf\n", c47_0.d[1], c47_1.d[1], c47_2.d[1], c47_3.d[1] );
  //printf( "%lf, %lf, %lf, %lf\n", c47_0.d[2], c47_1.d[2], c47_2.d[2], c47_3.d[2] );
  //printf( "%lf, %lf, %lf, %lf\n", c47_0.d[3], c47_1.d[3], c47_2.d[3], c47_3.d[3] );


  aa_tmp.v = _mm256_load_pd( (double*)aa );
  c03_0.v  = _mm256_add_pd( aa_tmp.v, c03_0.v );
  c03_1.v  = _mm256_add_pd( aa_tmp.v, c03_1.v );
  c03_2.v  = _mm256_add_pd( aa_tmp.v, c03_2.v );
  c03_3.v  = _mm256_add_pd( aa_tmp.v, c03_3.v );

  //printf( "aa03 = %lf, %lf, %lf, %lf\n", aa_tmp.d[0], aa_tmp.d[1], aa_tmp.d[2], aa_tmp.d[3] );
  //printf( "bb03 = %lf, %lf, %lf, %lf\n", bb[ 0 ], bb[ 1 ], bb[ 2 ], bb[ 3 ] );

  aa_tmp.v = _mm256_load_pd( (double*)( aa + 4 ) );
  c47_0.v  = _mm256_add_pd( aa_tmp.v, c47_0.v );
  c47_1.v  = _mm256_add_pd( aa_tmp.v, c47_1.v );
  c47_2.v  = _mm256_add_pd( aa_tmp.v, c47_2.v );
  c47_3.v  = _mm256_add_pd( aa_tmp.v, c47_3.v );
  

  //printf( "add a^2\n" );
  //printf( "%lf, %lf, %lf, %lf\n", c03_0.d[0], c03_1.d[0], c03_2.d[0], c03_3.d[0] );
  //printf( "%lf, %lf, %lf, %lf\n", c03_0.d[1], c03_1.d[1], c03_2.d[1], c03_3.d[1] );
  //printf( "%lf, %lf, %lf, %lf\n", c03_0.d[2], c03_1.d[2], c03_2.d[2], c03_3.d[2] );
  //printf( "%lf, %lf, %lf, %lf\n", c03_0.d[3], c03_1.d[3], c03_2.d[3], c03_3.d[3] );
  //printf( "%lf, %lf, %lf, %lf\n", c47_0.d[0], c47_1.d[0], c47_2.d[0], c47_3.d[0] );
  //printf( "%lf, %lf, %lf, %lf\n", c47_0.d[1], c47_1.d[1], c47_2.d[1], c47_3.d[1] );
  //printf( "%lf, %lf, %lf, %lf\n", c47_0.d[2], c47_1.d[2], c47_2.d[2], c47_3.d[2] );
  //printf( "%lf, %lf, %lf, %lf\n", c47_0.d[3], c47_1.d[3], c47_2.d[3], c47_3.d[3] );


  bb_tmp.v = _mm256_broadcast_sd( (double*)bb );
  c03_0.v  = _mm256_add_pd( bb_tmp.v, c03_0.v );
  c47_0.v  = _mm256_add_pd( bb_tmp.v, c47_0.v );

  bb_tmp.v = _mm256_broadcast_sd( (double*)( bb + 1 ) );
  c03_1.v  = _mm256_add_pd( bb_tmp.v, c03_1.v );
  c47_1.v  = _mm256_add_pd( bb_tmp.v, c47_1.v );

  bb_tmp.v = _mm256_broadcast_sd( (double*)( bb + 2 ) );
  c03_2.v  = _mm256_add_pd( bb_tmp.v, c03_2.v );
  c47_2.v  = _mm256_add_pd( bb_tmp.v, c47_2.v );

  bb_tmp.v = _mm256_broadcast_sd( (double*)( bb + 3 ) );
  c03_3.v  = _mm256_add_pd( bb_tmp.v, c03_3.v );
  c47_3.v  = _mm256_add_pd( bb_tmp.v, c47_3.v );



  // Check if there is any illegle value 
  c_tmp.v  = _mm256_broadcast_sd( &dzero );
  c03_0.v  = _mm256_max_pd( c_tmp.v, c03_0.v );
  c03_1.v  = _mm256_max_pd( c_tmp.v, c03_1.v );
  c03_2.v  = _mm256_max_pd( c_tmp.v, c03_2.v );
  c03_3.v  = _mm256_max_pd( c_tmp.v, c03_3.v );
  c47_0.v  = _mm256_max_pd( c_tmp.v, c47_0.v );
  c47_1.v  = _mm256_max_pd( c_tmp.v, c47_1.v );
  c47_2.v  = _mm256_max_pd( c_tmp.v, c47_2.v );
  c47_3.v  = _mm256_max_pd( c_tmp.v, c47_3.v );

  __asm__ volatile( "prefetcht0 0(%0)    \n\t" : :"r"( flag ) );

  // Transpose c03/c47 _0, _1, _2, _3 to be the row vector
  tmpc03_0.v = _mm256_shuffle_pd( c03_0.v, c03_1.v, 0x0 );
  tmpc03_1.v = _mm256_shuffle_pd( c03_0.v, c03_1.v, 0xF );

  tmpc03_2.v = _mm256_shuffle_pd( c03_2.v, c03_3.v, 0x0 );
  tmpc03_3.v = _mm256_shuffle_pd( c03_2.v, c03_3.v, 0xF );

  tmpc47_0.v = _mm256_shuffle_pd( c47_0.v, c47_1.v, 0x0 );
  tmpc47_1.v = _mm256_shuffle_pd( c47_0.v, c47_1.v, 0xF );

  tmpc47_2.v = _mm256_shuffle_pd( c47_2.v, c47_3.v, 0x0 );
  tmpc47_3.v = _mm256_shuffle_pd( c47_2.v, c47_3.v, 0xF );

  c03_0.v    = _mm256_permute2f128_pd( tmpc03_0.v, tmpc03_2.v, 0x20 );
  c03_2.v    = _mm256_permute2f128_pd( tmpc03_0.v, tmpc03_2.v, 0x31 );

  c03_1.v    = _mm256_permute2f128_pd( tmpc03_1.v, tmpc03_3.v, 0x20 );
  c03_3.v    = _mm256_permute2f128_pd( tmpc03_1.v, tmpc03_3.v, 0x31 );

  c47_0.v    = _mm256_permute2f128_pd( tmpc47_0.v, tmpc47_2.v, 0x20 );
  c47_2.v    = _mm256_permute2f128_pd( tmpc47_0.v, tmpc47_2.v, 0x31 );

  c47_1.v    = _mm256_permute2f128_pd( tmpc47_1.v, tmpc47_3.v, 0x20 );
  c47_3.v    = _mm256_permute2f128_pd( tmpc47_1.v, tmpc47_3.v, 0x31 );


  // c03_0;
  // c03_1;
  // c03_2;
  // c03_3;
  // c47_0;
  // c47_1;
  // c47_2;
  // c47_3;


  // Reuse b0, b1, b2, b3
  aa_tmp.v = _mm256_broadcast_sd( D );
  b0.v     = _mm256_cmp_pd( c03_0.v, aa_tmp.v, 0x1 );

  if ( !_mm256_testz_pd( b0.v, b0.v ) ) {
    _mm256_store_pd( c     , c03_0.v );
    flag[ 0 ] = 1;
    //printf( "store c03_0\n" );
  }

  aa_tmp.v = _mm256_broadcast_sd( D + r );
  b0.v     = _mm256_cmp_pd( c03_1.v, aa_tmp.v, 0x1 );

  if ( !_mm256_testz_pd( b0.v, b0.v ) ) {
    _mm256_store_pd( c + 1 * DRNN_NC, c03_1.v );
    flag[ DRNN_NC / DRNN_NR ] = 1;
    //printf( "store c03_1\n" );
  }

  aa_tmp.v = _mm256_broadcast_sd( D + 2 * r );
  b0.v     = _mm256_cmp_pd( c03_2.v, aa_tmp.v, 0x1 );

  if ( !_mm256_testz_pd( b0.v, b0.v ) ) {
    _mm256_store_pd( c + 2 * DRNN_NC, c03_2.v );
    flag[ 2 * DRNN_NC / DRNN_NR ] = 1;
    //printf( "store c03_2\n" );
  }

  aa_tmp.v = _mm256_broadcast_sd( D + 3 * r );
  b0.v     = _mm256_cmp_pd( c03_3.v, aa_tmp.v, 0x1 );

  if ( !_mm256_testz_pd( b0.v, b0.v ) ) {
    _mm256_store_pd( c + 3 * DRNN_NC, c03_3.v );
    flag[ 3 * DRNN_NC / DRNN_NR ] = 1;
    //printf( "store c03_3\n" );
  }

  aa_tmp.v = _mm256_broadcast_sd( D + 4 * r );
  b0.v     = _mm256_cmp_pd( c47_0.v, aa_tmp.v, 0x1 );

  if ( !_mm256_testz_pd( b0.v, b0.v ) ) {
    _mm256_store_pd( c + 4 * DRNN_NC, c47_0.v );
    flag[ 4 * DRNN_NC / DRNN_NR ] = 1;
    //printf( "store c47_0\n" );
  }

  aa_tmp.v = _mm256_broadcast_sd( D + 5 * r );
  b0.v     = _mm256_cmp_pd( c47_1.v, aa_tmp.v, 0x1 );

  if ( !_mm256_testz_pd( b0.v, b0.v ) ) {
    _mm256_store_pd( c + 5 * DRNN_NC, c47_1.v );
    flag[ 5 * DRNN_NC / DRNN_NR ] = 1;
    //printf( "store c47_1\n" );
  }

  aa_tmp.v = _mm256_broadcast_sd( D + 6 * r );
  b0.v     = _mm256_cmp_pd( c47_2.v, aa_tmp.v, 0x1 );

  if ( !_mm256_testz_pd( b0.v, b0.v ) ) {
    _mm256_store_pd( c + 6 * DRNN_NC, c47_2.v );
    flag[ 6 * DRNN_NC / DRNN_NR ] = 1;
    //printf( "store c47_2\n" );
  }

  aa_tmp.v = _mm256_broadcast_sd( D + 7 * r );
  b0.v     = _mm256_cmp_pd( c47_3.v, aa_tmp.v, 0x1 );

  if ( !_mm256_testz_pd( b0.v, b0.v ) ) {
    _mm256_store_pd( c + 7 * DRNN_NC, c47_3.v );
    flag[ 7 * DRNN_NC / DRNN_NR ] = 1;
    //printf( "store c47_3\n" );
  }


  //_mm256_store_pd( c     , c03_0.v );
  //_mm256_store_pd( c +  4, c03_1.v );
  //_mm256_store_pd( c +  8, c03_2.v );
  //_mm256_store_pd( c + 12, c03_3.v );
  //_mm256_store_pd( c + 16, c47_0.v );
  //_mm256_store_pd( c + 20, c47_1.v );
  //_mm256_store_pd( c + 24, c47_2.v );
  //_mm256_store_pd( c + 28, c47_3.v );
}
Exemplo n.º 13
-1
void
test5bit (void)
{
  d1 = _mm_cmp_sd (d2, d3, k4);		  /* { dg-error "the last argument must be a 5-bit immediate" } */
  a1 = _mm_cmp_ss (a2, a3, k4);		  /* { dg-error "the last argument must be a 5-bit immediate" } */
  d1 = _mm_cmp_pd (d2, d3, k4);		  /* { dg-error "the last argument must be a 5-bit immediate" } */
  a1 = _mm_cmp_ps (a2, a3, k4);		  /* { dg-error "the last argument must be a 5-bit immediate" } */
  e1 = _mm256_cmp_pd (e2, e3, k4);	  /* { dg-error "the last argument must be a 5-bit immediate" } */
  b1 = _mm256_cmp_ps (b2, b3, k4);	  /* { dg-error "the last argument must be a 5-bit immediate" } */
}