float AudioParamTimeline::valueForContextTime(AudioContext* context, float defaultValue, bool& hasValue)
{
    ASSERT(context);

    if (!context || !m_events.size() || context->currentTime() < m_events[0].time()) {
        hasValue = false;
        return defaultValue;
    }

    // Ask for just a single value.
    float value;
    float sampleRate = context->sampleRate();
    float startTime = narrowPrecisionToFloat(context->currentTime());
    float endTime = startTime + 1.1f / sampleRate; // time just beyond one sample-frame
    float controlRate = sampleRate / AudioNode::ProcessingSizeInFrames; // one parameter change per render quantum
    value = valuesForTimeRange(startTime, endTime, defaultValue, &value, 1, sampleRate, controlRate);

    hasValue = true;
    return value;
}
float AudioParamTimeline::valueForContextTime(AudioContext& context, float defaultValue, bool& hasValue)
{
    {
        std::unique_lock<Lock> lock(m_eventsMutex, std::try_to_lock);
        if (!lock.owns_lock() || !m_events.size() || context.currentTime() < m_events[0].time()) {
            hasValue = false;
            return defaultValue;
        }
    }

    // Ask for just a single value.
    float value;
    double sampleRate = context.sampleRate();
    double startTime = context.currentTime();
    double endTime = startTime + 1.1 / sampleRate; // time just beyond one sample-frame
    double controlRate = sampleRate / AudioNode::ProcessingSizeInFrames; // one parameter change per render quantum
    value = valuesForTimeRange(startTime, endTime, defaultValue, &value, 1, sampleRate, controlRate);

    hasValue = true;
    return value;
}
float AudioParamTimeline::valueForContextTime(AudioContext* context, float defaultValue, bool& hasValue)
{
    ASSERT(context);

    {
        MutexTryLocker tryLocker(m_eventsLock);
        if (!tryLocker.locked() || !context || !m_events.size() || context->currentTime() < m_events[0].time()) {
            hasValue = false;
            return defaultValue;
        }
    }

    // Ask for just a single value.
    float value;
    double sampleRate = context->sampleRate();
    double startTime = context->currentTime();
    double endTime = startTime + 1.1 / sampleRate; // time just beyond one sample-frame
    double controlRate = sampleRate / AudioNode::ProcessingSizeInFrames; // one parameter change per render quantum
    value = valuesForTimeRange(startTime, endTime, defaultValue, &value, 1, sampleRate, controlRate);

    hasValue = true;
    return value;
}