EncodedJSValue JSC_HOST_CALL jsFloat32ArrayPrototypeFunctionSubarray(ExecState* exec) { JSValue thisValue = exec->hostThisValue(); if (!thisValue.inherits(&JSFloat32Array::s_info)) return throwVMTypeError(exec); JSFloat32Array* castedThis = static_cast<JSFloat32Array*>(asObject(thisValue)); ASSERT_GC_OBJECT_INHERITS(castedThis, &JSFloat32Array::s_info); Float32Array* imp = static_cast<Float32Array*>(castedThis->impl()); int start(exec->argument(0).toInt32(exec)); if (exec->hadException()) return JSValue::encode(jsUndefined()); size_t argsCount = exec->argumentCount(); if (argsCount <= 1) { JSC::JSValue result = toJS(exec, castedThis->globalObject(), WTF::getPtr(imp->subarray(start))); return JSValue::encode(result); } int end(exec->argument(1).toInt32(exec)); if (exec->hadException()) return JSValue::encode(jsUndefined()); JSC::JSValue result = toJS(exec, castedThis->globalObject(), WTF::getPtr(imp->subarray(start, end))); return JSValue::encode(result); }
void WaveShaperDSPKernel::process(const float* source, float* destination, size_t framesToProcess) { ASSERT(source && destination && waveShaperProcessor()); Float32Array* curve = waveShaperProcessor()->curve(); if (!curve) { // Act as "straight wire" pass-through if no curve is set. memcpy(destination, source, sizeof(float) * framesToProcess); return; } float* curveData = curve->data(); int curveLength = curve->length(); ASSERT(curveData); if (!curveData || !curveLength) { memcpy(destination, source, sizeof(float) * framesToProcess); return; } // Apply waveshaping curve. for (unsigned i = 0; i < framesToProcess; ++i) { const float input = source[i]; // Calculate an index based on input -1 -> +1 with 0 being at the center of the curve data. int index = curveLength * 0.5 * (input + 1); // Clip index to the input range of the curve. // This takes care of input outside of nominal range -1 -> +1 index = max(index, 0); index = min(index, curveLength - 1); destination[i] = curveData[index]; } }
void AudioBuffer::CopyToChannel(JSContext* aJSContext, const Float32Array& aSource, uint32_t aChannelNumber, uint32_t aStartInChannel, ErrorResult& aRv) { aSource.ComputeLengthAndData(); uint32_t length = aSource.Length(); CheckedInt<uint32_t> end = aStartInChannel; end += length; if (aChannelNumber >= NumberOfChannels() || !end.isValid() || end.value() > mLength) { aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); return; } if (!mSharedChannels && JS_GetTypedArrayLength(mJSChannels[aChannelNumber]) != mLength) { // The array was probably neutered aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); return; } if (!RestoreJSChannelData(aJSContext)) { aRv.Throw(NS_ERROR_OUT_OF_MEMORY); return; } PodMove(JS_GetFloat32ArrayData(mJSChannels[aChannelNumber]) + aStartInChannel, aSource.Data(), length); }
void AudioBuffer::CopyToChannel(JSContext* aJSContext, const Float32Array& aSource, uint32_t aChannelNumber, uint32_t aStartInChannel, ErrorResult& aRv) { uint32_t length = aSource.Length(); if (aChannelNumber >= NumberOfChannels() || aStartInChannel + length >= mLength) { aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); return; } if (!mSharedChannels && JS_GetTypedArrayLength(mJSChannels[aChannelNumber]) != mLength) { // The array was probably neutered aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); return; } if (!RestoreJSChannelData(aJSContext)) { aRv.Throw(NS_ERROR_OUT_OF_MEMORY); return; } PodCopy(JS_GetFloat32ArrayData(mJSChannels[aChannelNumber]) + aStartInChannel, aSource.Data(), length); }
JSValue jsFloat32ArrayLength(ExecState* exec, JSValue slotBase, const Identifier&) { JSFloat32Array* castedThis = static_cast<JSFloat32Array*>(asObject(slotBase)); UNUSED_PARAM(exec); Float32Array* imp = static_cast<Float32Array*>(castedThis->impl()); JSValue result = jsNumber(imp->length()); return result; }
already_AddRefed<DOMMatrix> DOMMatrix::Constructor(const GlobalObject& aGlobal, const Float32Array& aArray32, ErrorResult& aRv) { RefPtr<DOMMatrix> obj = new DOMMatrix(aGlobal.GetAsSupports()); aArray32.ComputeLengthAndData(); SetDataInMatrix(obj, aArray32.Data(), aArray32.Length(), aRv); return obj.forget(); }
PassRefPtr<Float32Array> AudioBuffer::getChannelData(unsigned channelIndex, ExceptionCode& ec) { if (channelIndex >= m_channels.size()) { ec = SYNTAX_ERR; return nullptr; } Float32Array* channelData = m_channels[channelIndex].get(); return Float32Array::create(channelData->buffer(), channelData->byteOffset(), channelData->length()); }
PassRefPtr<Float32Array> AudioBuffer::getChannelData(unsigned channelIndex, ExceptionState& exceptionState) { if (channelIndex >= m_channels.size()) { exceptionState.throwDOMException(IndexSizeError, "channel index (" + String::number(channelIndex) + ") exceeds number of channels (" + String::number(m_channels.size()) + ")"); return nullptr; } Float32Array* channelData = m_channels[channelIndex].get(); return Float32Array::create(channelData->buffer(), channelData->byteOffset(), channelData->length()); }
void AnalyserNode::GetFloatTimeDomainData(const Float32Array& aArray) { float* buffer = aArray.Data(); uint32_t length = std::min(aArray.Length(), mBuffer.Length()); for (uint32_t i = 0; i < length; ++i) { buffer[i] = mBuffer[(i + mWriteIndex) % mBuffer.Length()];; } }
void BiquadFilterNode::GetFrequencyResponse(const Float32Array& aFrequencyHz, const Float32Array& aMagResponse, const Float32Array& aPhaseResponse) { aFrequencyHz.ComputeLengthAndData(); aMagResponse.ComputeLengthAndData(); aPhaseResponse.ComputeLengthAndData(); uint32_t length = std::min(std::min(aFrequencyHz.Length(), aMagResponse.Length()), aPhaseResponse.Length()); if (!length) { return; } nsAutoArrayPtr<float> frequencies(new float[length]); float* frequencyHz = aFrequencyHz.Data(); const double nyquist = Context()->SampleRate() * 0.5; // Normalize the frequencies for (uint32_t i = 0; i < length; ++i) { frequencies[i] = static_cast<float>(frequencyHz[i] / nyquist); } const double currentTime = Context()->CurrentTime(); double freq = mFrequency->GetValueAtTime(currentTime); double q = mQ->GetValueAtTime(currentTime); double gain = mGain->GetValueAtTime(currentTime); double detune = mDetune->GetValueAtTime(currentTime); WebCore::Biquad biquad; SetParamsOnBiquad(biquad, Context()->SampleRate(), mType, freq, q, gain, detune); biquad.getFrequencyResponse(int(length), frequencies, aMagResponse.Data(), aPhaseResponse.Data()); }
static v8::Handle<v8::Value> subarrayCallback(const v8::Arguments& args) { INC_STATS("DOM.Float32Array.subarray"); Float32Array* imp = V8Float32Array::toNative(args.Holder()); EXCEPTION_BLOCK(int, start, toInt32(args[0])); if (args.Length() <= 1) { return toV8(imp->subarray(start)); } EXCEPTION_BLOCK(int, end, toInt32(args[1])); return toV8(imp->subarray(start, end)); }
static v8::Handle<v8::Value> subarrayCallback(const v8::Arguments& args) { INC_STATS("DOM.Float32Array.subarray"); Float32Array* imp = V8Float32Array::toNative(args.Holder()); EXCEPTION_BLOCK(int, start, toInt32(MAYBE_MISSING_PARAMETER(args, 0, MissingIsUndefined))); if (args.Length() <= 1) { return toV8(imp->subarray(start)); } EXCEPTION_BLOCK(int, end, toInt32(MAYBE_MISSING_PARAMETER(args, 1, MissingIsUndefined))); return toV8(imp->subarray(start, end)); }
void WaveShaperDSPKernel::processCurve(const float* source, float* destination, size_t framesToProcess) { ASSERT(source && destination && waveShaperProcessor()); Float32Array* curve = waveShaperProcessor()->curve(); if (!curve) { // Act as "straight wire" pass-through if no curve is set. memcpy(destination, source, sizeof(float) * framesToProcess); return; } float* curveData = curve->data(); int curveLength = curve->length(); ASSERT(curveData); if (!curveData || !curveLength) { memcpy(destination, source, sizeof(float) * framesToProcess); return; } // Apply waveshaping curve. for (unsigned i = 0; i < framesToProcess; ++i) { const float input = source[i]; // Calculate a virtual index based on input -1 -> +1 with -1 being curve[0], +1 being // curve[curveLength - 1], and 0 being at the center of the curve data. Then linearly // interpolate between the two points in the curve. double virtualIndex = 0.5 * (input + 1) * (curveLength - 1); double output; if (virtualIndex < 0) { // input < -1, so use curve[0] output = curveData[0]; } else if (virtualIndex >= curveLength - 1) { // input >= 1, so use last curve value output = curveData[curveLength - 1]; } else { // The general case where -1 <= input < 1, where 0 <= virtualIndex < curveLength - 1, // so interpolate between the nearest samples on the curve. unsigned index1 = static_cast<unsigned>(virtualIndex); unsigned index2 = index1 + 1; double interpolationFactor = virtualIndex - index1; double value1 = curveData[index1]; double value2 = curveData[index2]; output = (1.0 - interpolationFactor) * value1 + interpolationFactor * value2; } destination[i] = output; } }
void AnalyserNode::GetFloatFrequencyData(const Float32Array& aArray) { if (!FFTAnalysis()) { // Might fail to allocate memory return; } float* buffer = aArray.Data(); uint32_t length = std::min(aArray.Length(), mOutputBuffer.Length()); for (uint32_t i = 0; i < length; ++i) { buffer[i] = WebAudioUtils::ConvertLinearToDecibels(mOutputBuffer[i], mMinDecibels); } }
void WaveShaperDSPKernel::processCurve(const float* source, float* destination, size_t framesToProcess) { ASSERT(source && destination && waveShaperProcessor()); Float32Array* curve = waveShaperProcessor()->curve(); if (!curve) { // Act as "straight wire" pass-through if no curve is set. memcpy(destination, source, sizeof(float) * framesToProcess); return; } float* curveData = curve->data(); int curveLength = curve->length(); ASSERT(curveData); if (!curveData || !curveLength) { memcpy(destination, source, sizeof(float) * framesToProcess); return; } // Apply waveshaping curve. for (unsigned i = 0; i < framesToProcess; ++i) { const float input = source[i]; // Calculate a virtual index based on input -1 -> +1 with 0 being at the center of the curve data. // Then linearly interpolate between the two points in the curve. double virtualIndex = 0.5 * (input + 1) * curveLength; int index1 = static_cast<int>(virtualIndex); int index2 = index1 + 1; double interpolationFactor = virtualIndex - index1; // Clip index to the input range of the curve. // This takes care of input outside of nominal range -1 -> +1 index1 = max(index1, 0); index1 = min(index1, curveLength - 1); index2 = max(index2, 0); index2 = min(index2, curveLength - 1); double value1 = curveData[index1]; double value2 = curveData[index2]; double output = (1.0 - interpolationFactor) * value1 + interpolationFactor * value2; destination[i] = output; } }
template<> void CData::writeTo<Uint8ClampedArray>(Uint8ClampedArray* dest) { #ifdef DIRECT_WRITE if (dest->length() != m_length) return; switch (getType()) { case ArrayBufferView::TypeFloat32: { Float32Array* src = getValue<Float32Array>(); if (!src) return; // write with clamping uint8_t* pDest = dest->data(); float* pSrc = src->data(); for (size_t pos = 0; pos < m_length; pos++) { float val = pSrc[pos]; pDest[pos] = (val > 255) ? 255 : ((val < 0) ? 0 : val); } break; } case ArrayBufferView::TypeFloat64: { Float64Array* src = getValue<Float64Array>(); if (!src) return; // write with clamping uint8_t* pDest = dest->data(); double* pSrc = src->data(); for (size_t pos = 0; pos < m_length; pos++) { double val = pSrc[pos]; pDest[pos] = (val > 255) ? 255 : ((val < 0) ? 0 : val); } break; } default: break; } #endif // DIRECT_WRITE }
void AudioBuffer::CopyFromChannel(const Float32Array& aDestination, uint32_t aChannelNumber, uint32_t aStartInChannel, ErrorResult& aRv) { aDestination.ComputeLengthAndData(); uint32_t length = aDestination.Length(); CheckedInt<uint32_t> end = aStartInChannel; end += length; if (aChannelNumber >= NumberOfChannels() || !end.isValid() || end.value() > Length()) { aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); return; } JS::AutoCheckCannotGC nogc; JSObject* channelArray = mJSChannels[aChannelNumber]; if (channelArray) { if (JS_GetTypedArrayLength(channelArray) != Length()) { // The array's buffer was detached. aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); return; } bool isShared = false; const float* sourceData = JS_GetFloat32ArrayData(channelArray, &isShared, nogc); // The sourceData arrays should all have originated in // RestoreJSChannelData, where they are created unshared. MOZ_ASSERT(!isShared); PodMove(aDestination.Data(), sourceData + aStartInChannel, length); return; } if (!mSharedChannels.IsNull()) { CopyChannelDataToFloat(mSharedChannels, aChannelNumber, aStartInChannel, aDestination.Data(), length); return; } PodZero(aDestination.Data(), length); }
void AudioBuffer::CopyToChannel(JSContext* aJSContext, const Float32Array& aSource, uint32_t aChannelNumber, uint32_t aStartInChannel, ErrorResult& aRv) { aSource.ComputeLengthAndData(); uint32_t length = aSource.Length(); CheckedInt<uint32_t> end = aStartInChannel; end += length; if (aChannelNumber >= NumberOfChannels() || !end.isValid() || end.value() > mLength) { aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); return; } if (!RestoreJSChannelData(aJSContext)) { aRv.Throw(NS_ERROR_OUT_OF_MEMORY); return; } JS::AutoCheckCannotGC nogc; JSObject* channelArray = mJSChannels[aChannelNumber]; if (JS_GetTypedArrayLength(channelArray) != mLength) { // The array's buffer was detached. aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); return; } bool isShared = false; float* channelData = JS_GetFloat32ArrayData(channelArray, &isShared, nogc); // The channelData arrays should all have originated in // RestoreJSChannelData, where they are created unshared. MOZ_ASSERT(!isShared); PodMove(channelData + aStartInChannel, aSource.Data(), length); }
already_AddRefed<PeriodicWave> AudioContext::CreatePeriodicWave(const Float32Array& aRealData, const Float32Array& aImagData, ErrorResult& aRv) { if (aRealData.Length() != aImagData.Length() || aRealData.Length() == 0 || aRealData.Length() > 4096) { aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); return nullptr; } nsRefPtr<PeriodicWave> periodicWave = new PeriodicWave(this, aRealData.Data(), aRealData.Length(), aImagData.Data(), aImagData.Length()); return periodicWave.forget(); }
already_AddRefed<PeriodicWave> AudioContext::CreatePeriodicWave(const Float32Array& aRealData, const Float32Array& aImagData, const PeriodicWaveConstraints& aConstraints, ErrorResult& aRv) { aRealData.ComputeLengthAndData(); aImagData.ComputeLengthAndData(); if (aRealData.Length() != aImagData.Length() || aRealData.Length() == 0) { aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); return nullptr; } RefPtr<PeriodicWave> periodicWave = new PeriodicWave(this, aRealData.Data(), aImagData.Data(), aImagData.Length(), aConstraints.mDisableNormalization, aRv); if (aRv.Failed()) { return nullptr; } return periodicWave.forget(); }
static v8::Handle<v8::Value> lengthAttrGetter(v8::Local<v8::String> name, const v8::AccessorInfo& info) { INC_STATS("DOM.Float32Array.length._get"); Float32Array* imp = V8Float32Array::toNative(info.Holder()); return v8::Integer::NewFromUnsigned(imp->length()); }
float AudioParamTimeline::valuesForTimeRangeImpl( double startTime, double endTime, float defaultValue, float* values, unsigned numberOfValues, double sampleRate, double controlRate) { ASSERT(values); if (!values) return defaultValue; // Return default value if there are no events matching the desired time range. if (!m_events.size() || endTime <= m_events[0].time()) { for (unsigned i = 0; i < numberOfValues; ++i) values[i] = defaultValue; return defaultValue; } // Maintain a running time and index for writing the values buffer. double currentTime = startTime; unsigned writeIndex = 0; // If first event is after startTime then fill initial part of values buffer with defaultValue // until we reach the first event time. double firstEventTime = m_events[0].time(); if (firstEventTime > startTime) { double fillToTime = min(endTime, firstEventTime); unsigned fillToFrame = AudioUtilities::timeToSampleFrame(fillToTime - startTime, sampleRate); fillToFrame = min(fillToFrame, numberOfValues); for (; writeIndex < fillToFrame; ++writeIndex) values[writeIndex] = defaultValue; currentTime = fillToTime; } float value = defaultValue; // Go through each event and render the value buffer where the times overlap, // stopping when we've rendered all the requested values. // FIXME: could try to optimize by avoiding having to iterate starting from the very first event // and keeping track of a "current" event index. int n = m_events.size(); for (int i = 0; i < n && writeIndex < numberOfValues; ++i) { ParamEvent& event = m_events[i]; ParamEvent* nextEvent = i < n - 1 ? &(m_events[i + 1]) : 0; // Wait until we get a more recent event. if (nextEvent && nextEvent->time() < currentTime) continue; float value1 = event.value(); double time1 = event.time(); float value2 = nextEvent ? nextEvent->value() : value1; double time2 = nextEvent ? nextEvent->time() : endTime + 1; double deltaTime = time2 - time1; float k = deltaTime > 0 ? 1 / deltaTime : 0; double sampleFrameTimeIncr = 1 / sampleRate; double fillToTime = min(endTime, time2); unsigned fillToFrame = AudioUtilities::timeToSampleFrame(fillToTime - startTime, sampleRate); fillToFrame = min(fillToFrame, numberOfValues); ParamEvent::Type nextEventType = nextEvent ? static_cast<ParamEvent::Type>(nextEvent->type()) : ParamEvent::LastType /* unknown */; // First handle linear and exponential ramps which require looking ahead to the next event. if (nextEventType == ParamEvent::LinearRampToValue) { for (; writeIndex < fillToFrame; ++writeIndex) { float x = (currentTime - time1) * k; value = (1 - x) * value1 + x * value2; values[writeIndex] = value; currentTime += sampleFrameTimeIncr; } } else if (nextEventType == ParamEvent::ExponentialRampToValue) { if (value1 <= 0 || value2 <= 0) { // Handle negative values error case by propagating previous value. for (; writeIndex < fillToFrame; ++writeIndex) values[writeIndex] = value; } else { float numSampleFrames = deltaTime * sampleRate; // The value goes exponentially from value1 to value2 in a duration of deltaTime seconds (corresponding to numSampleFrames). // Compute the per-sample multiplier. float multiplier = powf(value2 / value1, 1 / numSampleFrames); // Set the starting value of the exponential ramp. This is the same as multiplier ^ // AudioUtilities::timeToSampleFrame(currentTime - time1, sampleRate), but is more // accurate, especially if multiplier is close to 1. value = value1 * powf(value2 / value1, AudioUtilities::timeToSampleFrame(currentTime - time1, sampleRate) / numSampleFrames); for (; writeIndex < fillToFrame; ++writeIndex) { values[writeIndex] = value; value *= multiplier; currentTime += sampleFrameTimeIncr; } } } else { // Handle event types not requiring looking ahead to the next event. switch (event.type()) { case ParamEvent::SetValue: case ParamEvent::LinearRampToValue: case ParamEvent::ExponentialRampToValue: { currentTime = fillToTime; // Simply stay at a constant value. value = event.value(); for (; writeIndex < fillToFrame; ++writeIndex) values[writeIndex] = value; break; } case ParamEvent::SetTarget: { currentTime = fillToTime; // Exponential approach to target value with given time constant. float target = event.value(); float timeConstant = event.timeConstant(); float discreteTimeConstant = static_cast<float>(AudioUtilities::discreteTimeConstantForSampleRate(timeConstant, controlRate)); for (; writeIndex < fillToFrame; ++writeIndex) { values[writeIndex] = value; value += (target - value) * discreteTimeConstant; } break; } case ParamEvent::SetValueCurve: { Float32Array* curve = event.curve(); float* curveData = curve ? curve->data() : 0; unsigned numberOfCurvePoints = curve ? curve->length() : 0; // Curve events have duration, so don't just use next event time. float duration = event.duration(); float durationFrames = duration * sampleRate; float curvePointsPerFrame = static_cast<float>(numberOfCurvePoints) / durationFrames; if (!curve || !curveData || !numberOfCurvePoints || duration <= 0 || sampleRate <= 0) { // Error condition - simply propagate previous value. currentTime = fillToTime; for (; writeIndex < fillToFrame; ++writeIndex) values[writeIndex] = value; break; } // Save old values and recalculate information based on the curve's duration // instead of the next event time. unsigned nextEventFillToFrame = fillToFrame; float nextEventFillToTime = fillToTime; fillToTime = min(endTime, time1 + duration); fillToFrame = AudioUtilities::timeToSampleFrame(fillToTime - startTime, sampleRate); fillToFrame = min(fillToFrame, numberOfValues); // Index into the curve data using a floating-point value. // We're scaling the number of curve points by the duration (see curvePointsPerFrame). float curveVirtualIndex = 0; if (time1 < currentTime) { // Index somewhere in the middle of the curve data. // Don't use timeToSampleFrame() since we want the exact floating-point frame. float frameOffset = (currentTime - time1) * sampleRate; curveVirtualIndex = curvePointsPerFrame * frameOffset; } // Render the stretched curve data using nearest neighbor sampling. // Oversampled curve data can be provided if smoothness is desired. for (; writeIndex < fillToFrame; ++writeIndex) { // Ideally we'd use round() from MathExtras, but we're in a tight loop here // and we're trading off precision for extra speed. unsigned curveIndex = static_cast<unsigned>(0.5 + curveVirtualIndex); curveVirtualIndex += curvePointsPerFrame; // Bounds check. if (curveIndex < numberOfCurvePoints) value = curveData[curveIndex]; values[writeIndex] = value; } // If there's any time left after the duration of this event and the start // of the next, then just propagate the last value. for (; writeIndex < nextEventFillToFrame; ++writeIndex) values[writeIndex] = value; // Re-adjust current time currentTime = nextEventFillToTime; break; } } } } // If there's any time left after processing the last event then just propagate the last value // to the end of the values buffer. for (; writeIndex < numberOfValues; ++writeIndex) values[writeIndex] = value; return value; }