/******************************************************************************* VectorScalarMultiplyD */ Error_t VectorScalarMultiplyD(double *dest, const double *in1, const double scalar, unsigned length) { #ifdef __APPLE__ // Use the Accelerate framework if we have it vDSP_vsmulD(in1, 1, &scalar,dest, 1, length); #else // Otherwise do it manually unsigned i; const unsigned end = 4 * (length / 4); for (i = 0; i < end; i+=4) { dest[i] = in1[i] * scalar; dest[i + 1] = in1[i + 1] * scalar; dest[i + 2] = in1[i + 2] * scalar; dest[i + 3] = in1[i + 3] * scalar; } for (i = end; i < length; ++i) { dest[i] = in1[i] * scalar; } #endif return NOERR; }
/******************************************************************************* DoubleBufferToInt16 */ Error_t DoubleBufferToInt16(signed short* dest, const double* src, unsigned length) { #ifdef __APPLE__ // Use the Accelerate framework if we have it double scale = (float)INT16_MAX; double temp[length]; vDSP_vsmulD(src, 1, &scale, temp, 1, length); vDSP_vfix16D(temp,1,dest,1,length); #else // Otherwise do it manually unsigned i; const unsigned end = 4 * (length / 4); for (i = 0; i < end; i+=4) { dest[i] = floatToInt16(*src++); dest[i + 1] = floatToInt16(*src++); dest[i + 2] = floatToInt16(*src++); dest[i + 3] = floatToInt16(*src++); } for (i = end; i < length; ++i) { dest[i] = floatToInt16(*src++); } #endif return NOERR; }
static void ao_vmults(vec *c, vec v, double n) { vDSP_vsmulD(v.co, 1, &n, c->co, 1, 3); /* c->x = v.x * n; c->y = v.y * n; c->z = v.z * n; */ }
void JUCE_CALLTYPE FloatVectorOperations::copyWithMultiply (double* dest, const double* src, double multiplier, int num) noexcept { #if JUCE_USE_VDSP_FRAMEWORK vDSP_vsmulD (src, 1, &multiplier, dest, 1, num); #else JUCE_PERFORM_VEC_OP_SRC_DEST (dest[i] = src[i] * multiplier, Mode::mul (mult, s), JUCE_LOAD_SRC, JUCE_INCREMENT_SRC_DEST, const Mode::ParallelType mult = Mode::load1 (multiplier);) #endif }